mirror of
https://github.com/akyaiy/GoSally-mvp.git
synced 2026-01-03 15:12:26 +00:00
Project structure refactor:
- Change package name general_server to gateway - Changing the structure of directories and packages - Adding vendor to the project
This commit is contained in:
14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
freebsd_task:
|
||||
name: 'FreeBSD'
|
||||
freebsd_instance:
|
||||
image_family: freebsd-14-2
|
||||
install_script:
|
||||
- pkg update -f
|
||||
- pkg install -y go
|
||||
test_script:
|
||||
# run tests as user "cirrus" instead of root
|
||||
- pw useradd cirrus -m
|
||||
- chown -R cirrus:cirrus .
|
||||
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...
|
||||
10
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
10
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# go test -c output
|
||||
*.test
|
||||
*.test.exe
|
||||
|
||||
# Output of go build ./cmd/fsnotify
|
||||
/fsnotify
|
||||
/fsnotify.exe
|
||||
|
||||
/test/kqueue
|
||||
/test/a.out
|
||||
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
||||
Nathan Youngman <git@nathany.com> <4566+nathany@users.noreply.github.com>
|
||||
602
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
602
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,602 @@
|
||||
# Changelog
|
||||
|
||||
1.9.0 2024-04-04
|
||||
----------------
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- all: make BufferedWatcher buffered again ([#657])
|
||||
|
||||
- inotify: fix race when adding/removing watches while a watched path is being
|
||||
deleted ([#678], [#686])
|
||||
|
||||
- inotify: don't send empty event if a watched path is unmounted ([#655])
|
||||
|
||||
- inotify: don't register duplicate watches when watching both a symlink and its
|
||||
target; previously that would get "half-added" and removing the second would
|
||||
panic ([#679])
|
||||
|
||||
- kqueue: fix watching relative symlinks ([#681])
|
||||
|
||||
- kqueue: correctly mark pre-existing entries when watching a link to a dir on
|
||||
kqueue ([#682])
|
||||
|
||||
- illumos: don't send error if changed file is deleted while processing the
|
||||
event ([#678])
|
||||
|
||||
|
||||
[#657]: https://github.com/fsnotify/fsnotify/pull/657
|
||||
[#678]: https://github.com/fsnotify/fsnotify/pull/678
|
||||
[#686]: https://github.com/fsnotify/fsnotify/pull/686
|
||||
[#655]: https://github.com/fsnotify/fsnotify/pull/655
|
||||
[#681]: https://github.com/fsnotify/fsnotify/pull/681
|
||||
[#679]: https://github.com/fsnotify/fsnotify/pull/679
|
||||
[#682]: https://github.com/fsnotify/fsnotify/pull/682
|
||||
|
||||
1.8.0 2024-10-31
|
||||
----------------
|
||||
|
||||
### Additions
|
||||
|
||||
- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
|
||||
|
||||
- kqueue: ignore events with Ident=0 ([#590])
|
||||
|
||||
- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
|
||||
|
||||
- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
|
||||
|
||||
- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
|
||||
|
||||
- inotify: fix panic when calling Remove() in a goroutine ([#650])
|
||||
|
||||
- fen: allow watching subdirectories of watched directories ([#621])
|
||||
|
||||
[#590]: https://github.com/fsnotify/fsnotify/pull/590
|
||||
[#610]: https://github.com/fsnotify/fsnotify/pull/610
|
||||
[#617]: https://github.com/fsnotify/fsnotify/pull/617
|
||||
[#619]: https://github.com/fsnotify/fsnotify/pull/619
|
||||
[#620]: https://github.com/fsnotify/fsnotify/pull/620
|
||||
[#621]: https://github.com/fsnotify/fsnotify/pull/621
|
||||
[#625]: https://github.com/fsnotify/fsnotify/pull/625
|
||||
[#650]: https://github.com/fsnotify/fsnotify/pull/650
|
||||
|
||||
1.7.0 - 2023-10-22
|
||||
------------------
|
||||
This version of fsnotify needs Go 1.17.
|
||||
|
||||
### Additions
|
||||
|
||||
- illumos: add FEN backend to support illumos and Solaris. ([#371])
|
||||
|
||||
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
|
||||
in cases where you can't control the kernel buffer and receive a large number
|
||||
of events in bursts. ([#550], [#572])
|
||||
|
||||
- all: add `AddWith()`, which is identical to `Add()` but allows passing
|
||||
options. ([#521])
|
||||
|
||||
- windows: allow setting the ReadDirectoryChangesW() buffer size with
|
||||
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
|
||||
works on all platforms and is enough for most purposes, but in some cases a
|
||||
highest buffer is needed. ([#521])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- inotify: remove watcher if a watched path is renamed ([#518])
|
||||
|
||||
After a rename the reported name wasn't updated, or even an empty string.
|
||||
Inotify doesn't provide any good facilities to update it, so just remove the
|
||||
watcher. This is already how it worked on kqueue and FEN.
|
||||
|
||||
On Windows this does work, and remains working.
|
||||
|
||||
- windows: don't listen for file attribute changes ([#520])
|
||||
|
||||
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
|
||||
with no way to see if they're a file write or attribute change, so would show
|
||||
up as a fsnotify.Write event. This is never useful, and could result in many
|
||||
spurious Write events.
|
||||
|
||||
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
|
||||
|
||||
Before it would merely return "short read", making it hard to detect this
|
||||
error.
|
||||
|
||||
- kqueue: make sure events for all files are delivered properly when removing a
|
||||
watched directory ([#526])
|
||||
|
||||
Previously they would get sent with `""` (empty string) or `"."` as the path
|
||||
name.
|
||||
|
||||
- kqueue: don't emit spurious Create events for symbolic links ([#524])
|
||||
|
||||
The link would get resolved but kqueue would "forget" it already saw the link
|
||||
itself, resulting on a Create for every Write event for the directory.
|
||||
|
||||
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
|
||||
|
||||
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
|
||||
`backend_other.go`, making it easier to use on unsupported platforms such as
|
||||
WASM, AIX, etc. ([#528])
|
||||
|
||||
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
|
||||
Google AppEngine forbids usage of the unsafe package so the inotify backend
|
||||
won't compile there.
|
||||
|
||||
[#371]: https://github.com/fsnotify/fsnotify/pull/371
|
||||
[#516]: https://github.com/fsnotify/fsnotify/pull/516
|
||||
[#518]: https://github.com/fsnotify/fsnotify/pull/518
|
||||
[#520]: https://github.com/fsnotify/fsnotify/pull/520
|
||||
[#521]: https://github.com/fsnotify/fsnotify/pull/521
|
||||
[#524]: https://github.com/fsnotify/fsnotify/pull/524
|
||||
[#525]: https://github.com/fsnotify/fsnotify/pull/525
|
||||
[#526]: https://github.com/fsnotify/fsnotify/pull/526
|
||||
[#528]: https://github.com/fsnotify/fsnotify/pull/528
|
||||
[#537]: https://github.com/fsnotify/fsnotify/pull/537
|
||||
[#550]: https://github.com/fsnotify/fsnotify/pull/550
|
||||
[#572]: https://github.com/fsnotify/fsnotify/pull/572
|
||||
|
||||
1.6.0 - 2022-10-13
|
||||
------------------
|
||||
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
|
||||
but not documented). It also increases the minimum Linux version to 2.6.32.
|
||||
|
||||
### Additions
|
||||
|
||||
- all: add `Event.Has()` and `Op.Has()` ([#477])
|
||||
|
||||
This makes checking events a lot easier; for example:
|
||||
|
||||
if event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
}
|
||||
|
||||
Becomes:
|
||||
|
||||
if event.Has(Write) && !event.Has(Remove) {
|
||||
}
|
||||
|
||||
- all: add cmd/fsnotify ([#463])
|
||||
|
||||
A command-line utility for testing and some examples.
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- inotify: don't ignore events for files that don't exist ([#260], [#470])
|
||||
|
||||
Previously the inotify watcher would call `os.Lstat()` to check if a file
|
||||
still exists before emitting events.
|
||||
|
||||
This was inconsistent with other platforms and resulted in inconsistent event
|
||||
reporting (e.g. when a file is quickly removed and re-created), and generally
|
||||
a source of confusion. It was added in 2013 to fix a memory leak that no
|
||||
longer exists.
|
||||
|
||||
- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
|
||||
not watched ([#460])
|
||||
|
||||
- inotify: replace epoll() with non-blocking inotify ([#434])
|
||||
|
||||
Non-blocking inotify was not generally available at the time this library was
|
||||
written in 2014, but now it is. As a result, the minimum Linux version is
|
||||
bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
|
||||
|
||||
- kqueue: don't check for events every 100ms ([#480])
|
||||
|
||||
The watcher would wake up every 100ms, even when there was nothing to do. Now
|
||||
it waits until there is something to do.
|
||||
|
||||
- macos: retry opening files on EINTR ([#475])
|
||||
|
||||
- kqueue: skip unreadable files ([#479])
|
||||
|
||||
kqueue requires a file descriptor for every file in a directory; this would
|
||||
fail if a file was unreadable by the current user. Now these files are simply
|
||||
skipped.
|
||||
|
||||
- windows: fix renaming a watched directory if the parent is also watched ([#370])
|
||||
|
||||
- windows: increase buffer size from 4K to 64K ([#485])
|
||||
|
||||
- windows: close file handle on Remove() ([#288])
|
||||
|
||||
- kqueue: put pathname in the error if watching a file fails ([#471])
|
||||
|
||||
- inotify, windows: calling Close() more than once could race ([#465])
|
||||
|
||||
- kqueue: improve Close() performance ([#233])
|
||||
|
||||
- all: various documentation additions and clarifications.
|
||||
|
||||
[#233]: https://github.com/fsnotify/fsnotify/pull/233
|
||||
[#260]: https://github.com/fsnotify/fsnotify/pull/260
|
||||
[#288]: https://github.com/fsnotify/fsnotify/pull/288
|
||||
[#370]: https://github.com/fsnotify/fsnotify/pull/370
|
||||
[#434]: https://github.com/fsnotify/fsnotify/pull/434
|
||||
[#460]: https://github.com/fsnotify/fsnotify/pull/460
|
||||
[#463]: https://github.com/fsnotify/fsnotify/pull/463
|
||||
[#465]: https://github.com/fsnotify/fsnotify/pull/465
|
||||
[#470]: https://github.com/fsnotify/fsnotify/pull/470
|
||||
[#471]: https://github.com/fsnotify/fsnotify/pull/471
|
||||
[#475]: https://github.com/fsnotify/fsnotify/pull/475
|
||||
[#477]: https://github.com/fsnotify/fsnotify/pull/477
|
||||
[#479]: https://github.com/fsnotify/fsnotify/pull/479
|
||||
[#480]: https://github.com/fsnotify/fsnotify/pull/480
|
||||
[#485]: https://github.com/fsnotify/fsnotify/pull/485
|
||||
|
||||
## [1.5.4] - 2022-04-25
|
||||
|
||||
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
|
||||
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
|
||||
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
|
||||
|
||||
## [1.5.3] - 2022-04-22
|
||||
|
||||
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
|
||||
|
||||
## [1.5.2] - 2022-04-21
|
||||
|
||||
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
|
||||
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
|
||||
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
|
||||
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
|
||||
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
|
||||
|
||||
## [1.5.1] - 2021-08-24
|
||||
|
||||
* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
|
||||
|
||||
## [1.5.0] - 2021-08-20
|
||||
|
||||
* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||
* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
|
||||
* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
|
||||
* CI: Use GitHub Actions for CI and cover go 1.12-1.17
|
||||
[#378](https://github.com/fsnotify/fsnotify/pull/378)
|
||||
[#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||
[#385](https://github.com/fsnotify/fsnotify/pull/385)
|
||||
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
|
||||
|
||||
## [1.4.9] - 2020-03-11
|
||||
|
||||
* Move example usage to the readme #329. This may resolve #328.
|
||||
|
||||
## [1.4.8] - 2020-03-10
|
||||
|
||||
* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
|
||||
* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
|
||||
* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
|
||||
* CI: Less verbosity (@nathany #267)
|
||||
* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
|
||||
* Tests: Check if channels are closed in the example (@alexeykazakov #244)
|
||||
* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
|
||||
* CI: Add windows to travis matrix (@cpuguy83 #284)
|
||||
* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
|
||||
* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
|
||||
* Linux: open files with close-on-exec (@linxiulei #273)
|
||||
* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
|
||||
* Project: Add go.mod (@nathany #309)
|
||||
* Project: Revise editor config (@nathany #309)
|
||||
* Project: Update copyright for 2019 (@nathany #309)
|
||||
* CI: Drop go1.8 from CI matrix (@nathany #309)
|
||||
* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
|
||||
|
||||
## [1.4.7] - 2018-01-09
|
||||
|
||||
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
|
||||
* Tests: Fix missing verb on format string (thanks @rchiossi)
|
||||
* Linux: Fix deadlock in Remove (thanks @aarondl)
|
||||
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
|
||||
* Docs: Moved FAQ into the README (thanks @vahe)
|
||||
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
|
||||
* Docs: replace references to OS X with macOS
|
||||
|
||||
## [1.4.2] - 2016-10-10
|
||||
|
||||
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||
|
||||
## [1.4.1] - 2016-10-04
|
||||
|
||||
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||
|
||||
## [1.4.0] - 2016-10-01
|
||||
|
||||
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||
|
||||
## [1.3.1] - 2016-06-28
|
||||
|
||||
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||
|
||||
## [1.3.0] - 2016-04-19
|
||||
|
||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||
|
||||
## [1.2.10] - 2016-03-02
|
||||
|
||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||
|
||||
## [1.2.9] - 2016-01-13
|
||||
|
||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||
|
||||
## [1.2.8] - 2015-12-17
|
||||
|
||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||
* inotify: fix race in test
|
||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||
|
||||
## [1.2.5] - 2015-10-17
|
||||
|
||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||
|
||||
## [1.2.1] - 2015-10-14
|
||||
|
||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||
|
||||
## [1.2.0] - 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||
|
||||
## [1.1.1] - 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## [1.1.0] - 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
* only need to store flags on directories
|
||||
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||
* done can be an unbuffered channel
|
||||
* remove calls to os.NewSyscallError
|
||||
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## [1.0.4] - 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## [1.0.3] - 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||
|
||||
## [1.0.2] - 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## [1.0.0] - 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||
* Minor updates based on feedback from golint.
|
||||
|
||||
## dev / 2014-07-09
|
||||
|
||||
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||
|
||||
## dev / 2014-07-04
|
||||
|
||||
* kqueue: fix incorrect mutex used in Close()
|
||||
* Update example to demonstrate usage of Op.
|
||||
|
||||
## dev / 2014-06-28
|
||||
|
||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||
* Fix for String() method on Event (thanks Alex Brainman)
|
||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||
|
||||
## dev / 2014-06-21
|
||||
|
||||
* Events channel of type Event rather than *Event.
|
||||
* [internal] use syscall constants directly for inotify and kqueue.
|
||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||
|
||||
## dev / 2014-06-19
|
||||
|
||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||
* [internal] remove cookie from Event struct (unused).
|
||||
* [internal] Event struct has the same definition across every OS.
|
||||
* [internal] remove internal watch and removeWatch methods.
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||
* [API] Pluralized channel names: Events and Errors.
|
||||
* [API] Renamed FileEvent struct to Event.
|
||||
* [API] Op constants replace methods like IsCreate().
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## dev / 2014-05-23
|
||||
|
||||
* [API] Remove current implementation of WatchFlags.
|
||||
* current implementation doesn't take advantage of OS for efficiency
|
||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## [0.9.3] - 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## [0.9.2] - 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## [0.9.1] - 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## [0.9.0] - 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## [0.8.12] - 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## [0.8.11] - 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
||||
|
||||
## [0.8.10] - 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## [0.8.9] - 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## [0.8.8] - 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## [0.8.7] - 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## [0.8.6] - 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## [0.8.5] - 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## [0.8.4] - 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## [0.8.3] - 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## [0.8.2] - 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## [0.8.1] - 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## [0.8.0] - 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## [0.7.4] - 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## [0.7.3] - 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## [0.7.2] - 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## [0.7.1] - 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## [0.7.0] - 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## [0.6.0] - 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## [0.5.1] - 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## [0.5.0] - 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## [0.4.0] - 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## [0.3.0] - 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## [0.2.0] - 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## [0.1.0] - 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
||||
145
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
145
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
Thank you for your interest in contributing to fsnotify! We try to review and
|
||||
merge PRs in a reasonable timeframe, but please be aware that:
|
||||
|
||||
- To avoid "wasted" work, please discuss changes on the issue tracker first. You
|
||||
can just send PRs, but they may end up being rejected for one reason or the
|
||||
other.
|
||||
|
||||
- fsnotify is a cross-platform library, and changes must work reasonably well on
|
||||
all supported platforms.
|
||||
|
||||
- Changes will need to be compatible; old code should still compile, and the
|
||||
runtime behaviour can't change in ways that are likely to lead to problems for
|
||||
users.
|
||||
|
||||
Testing
|
||||
-------
|
||||
Just `go test ./...` runs all the tests; the CI runs this on all supported
|
||||
platforms. Testing different platforms locally can be done with something like
|
||||
[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
|
||||
|
||||
Use the `-short` flag to make the "stress test" run faster.
|
||||
|
||||
Writing new tests
|
||||
-----------------
|
||||
Scripts in the testdata directory allow creating test cases in a "shell-like"
|
||||
syntax. The basic format is:
|
||||
|
||||
script
|
||||
|
||||
Output:
|
||||
desired output
|
||||
|
||||
For example:
|
||||
|
||||
# Create a new empty file with some data.
|
||||
watch /
|
||||
echo data >/file
|
||||
|
||||
Output:
|
||||
create /file
|
||||
write /file
|
||||
|
||||
Just create a new file to add a new test; select which tests to run with
|
||||
`-run TestScript/[path]`.
|
||||
|
||||
script
|
||||
------
|
||||
The script is a "shell-like" script:
|
||||
|
||||
cmd arg arg
|
||||
|
||||
Comments are supported with `#`:
|
||||
|
||||
# Comment
|
||||
cmd arg arg # Comment
|
||||
|
||||
All operations are done in a temp directory; a path like "/foo" is rewritten to
|
||||
"/tmp/TestFoo/foo".
|
||||
|
||||
Arguments can be quoted with `"` or `'`; there are no escapes and they're
|
||||
functionally identical right now, but this may change in the future, so best to
|
||||
assume shell-like rules.
|
||||
|
||||
touch "/file with spaces"
|
||||
|
||||
End-of-line escapes with `\` are not supported.
|
||||
|
||||
### Supported commands
|
||||
|
||||
watch path [ops] # Watch the path, reporting events for it. Nothing is
|
||||
# watched by default. Optionally a list of ops can be
|
||||
# given, as with AddWith(path, WithOps(...)).
|
||||
unwatch path # Stop watching the path.
|
||||
watchlist n # Assert watchlist length.
|
||||
|
||||
stop # Stop running the script; for debugging.
|
||||
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
|
||||
parallel by default, so -parallel=1 is probably a good
|
||||
idea).
|
||||
print [any strings] # Print text to stdout; for debugging.
|
||||
|
||||
touch path
|
||||
mkdir [-p] dir
|
||||
ln -s target link # Only ln -s supported.
|
||||
mkfifo path
|
||||
mknod dev path
|
||||
mv src dst
|
||||
rm [-r] path
|
||||
chmod mode path # Octal only
|
||||
sleep time-in-ms
|
||||
|
||||
cat path # Read path (does nothing with the data; just reads it).
|
||||
echo str >>path # Append "str" to "path".
|
||||
echo str >path # Truncate "path" and write "str".
|
||||
|
||||
require reason # Skip the test if "reason" is true; "skip" and
|
||||
skip reason # "require" behave identical; it supports both for
|
||||
# readability. Possible reasons are:
|
||||
#
|
||||
# always Always skip this test.
|
||||
# symlink Symlinks are supported (requires admin
|
||||
# permissions on Windows).
|
||||
# mkfifo Platform doesn't support FIFO named sockets.
|
||||
# mknod Platform doesn't support device nodes.
|
||||
|
||||
|
||||
output
|
||||
------
|
||||
After `Output:` the desired output is given; this is indented by convention, but
|
||||
that's not required.
|
||||
|
||||
The format of that is:
|
||||
|
||||
# Comment
|
||||
event path # Comment
|
||||
|
||||
system:
|
||||
event path
|
||||
system2:
|
||||
event path
|
||||
|
||||
Every event is one line, and any whitespace between the event and path are
|
||||
ignored. The path can optionally be surrounded in ". Anything after a "#" is
|
||||
ignored.
|
||||
|
||||
Platform-specific tests can be added after GOOS; for example:
|
||||
|
||||
watch /
|
||||
touch /file
|
||||
|
||||
Output:
|
||||
# Tested if nothing else matches
|
||||
create /file
|
||||
|
||||
# Windows-specific test.
|
||||
windows:
|
||||
write /file
|
||||
|
||||
You can specify multiple platforms with a comma (e.g. "windows, linux:").
|
||||
"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
|
||||
|
||||
|
||||
[goon]: https://github.com/arp242/goon
|
||||
[Vagrant]: https://www.vagrantup.com/
|
||||
[integration_test.go]: /integration_test.go
|
||||
25
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
25
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
Copyright © 2012 The Go Authors. All rights reserved.
|
||||
Copyright © fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
* Neither the name of Google Inc. nor the names of its contributors may be used
|
||||
to endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
182
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
182
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
fsnotify is a Go library to provide cross-platform filesystem notifications on
|
||||
Windows, Linux, macOS, BSD, and illumos.
|
||||
|
||||
Go 1.17 or newer is required; the full documentation is at
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
---
|
||||
|
||||
Platform support:
|
||||
|
||||
| Backend | OS | Status |
|
||||
| :-------------------- | :--------- | :------------------------------------------------------------------------ |
|
||||
| inotify | Linux | Supported |
|
||||
| kqueue | BSD, macOS | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FEN | illumos | Supported |
|
||||
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
|
||||
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
|
||||
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
Linux and illumos should include Android and Solaris, but these are currently
|
||||
untested.
|
||||
|
||||
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
|
||||
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
|
||||
|
||||
Usage
|
||||
-----
|
||||
A basic example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create new watcher.
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
// Start listening for events.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("event:", event)
|
||||
if event.Has(fsnotify.Write) {
|
||||
log.Println("modified file:", event.Name)
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Add a path.
|
||||
err = watcher.Add("/tmp")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Block main goroutine forever.
|
||||
<-make(chan struct{})
|
||||
}
|
||||
```
|
||||
|
||||
Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
|
||||
run with:
|
||||
|
||||
% go run ./cmd/fsnotify
|
||||
|
||||
Further detailed documentation can be found in godoc:
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
FAQ
|
||||
---
|
||||
### Will a file still be watched when it's moved to another directory?
|
||||
No, not unless you are watching the location it was moved to.
|
||||
|
||||
### Are subdirectories watched?
|
||||
No, you must add watches for any directory you want to watch (a recursive
|
||||
watcher is on the roadmap: [#18]).
|
||||
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
|
||||
### Do I have to watch the Error and Event channels in a goroutine?
|
||||
Yes. You can read both channels in the same goroutine using `select` (you don't
|
||||
need a separate goroutine for both channels; see the example).
|
||||
|
||||
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
|
||||
fsnotify requires support from underlying OS to work. The current NFS and SMB
|
||||
protocols does not provide network level support for file notifications, and
|
||||
neither do the /proc and /sys virtual filesystems.
|
||||
|
||||
This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
|
||||
|
||||
[#9]: https://github.com/fsnotify/fsnotify/issues/9
|
||||
|
||||
### Why do I get many Chmod events?
|
||||
Some programs may generate a lot of attribute changes; for example Spotlight on
|
||||
macOS, anti-virus programs, backup applications, and some others are known to do
|
||||
this. As a rule, it's typically best to ignore Chmod events. They're often not
|
||||
useful, and tend to cause problems.
|
||||
|
||||
Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
temporary workaround is to add your folder(s) to the *Spotlight Privacy
|
||||
settings* until we have a native FSEvents implementation (see [#11]).
|
||||
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
|
||||
### Watching a file doesn't work well
|
||||
Watching individual files (rather than directories) is generally not recommended
|
||||
as many programs (especially editors) update files atomically: it will write to
|
||||
a temporary file which is then moved to to destination, overwriting the original
|
||||
(or some variant thereof). The watcher on the original file is now lost, as that
|
||||
no longer exists.
|
||||
|
||||
The upshot of this is that a power failure or crash won't leave a half-written
|
||||
file.
|
||||
|
||||
Watch the parent directory and use `Event.Name` to filter out files you're not
|
||||
interested in. There is an example of this in `cmd/fsnotify/file.go`.
|
||||
|
||||
Platform-specific notes
|
||||
-----------------------
|
||||
### Linux
|
||||
When a file is removed a REMOVE event won't be emitted until all file
|
||||
descriptors are closed; it will emit a CHMOD instead:
|
||||
|
||||
fp := os.Open("file")
|
||||
os.Remove("file") // CHMOD
|
||||
fp.Close() // REMOVE
|
||||
|
||||
This is the event that inotify sends, so not much can be changed about this.
|
||||
|
||||
The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
|
||||
the number of watches per user, and `fs.inotify.max_user_instances` specifies
|
||||
the maximum number of inotify instances per user. Every Watcher you create is an
|
||||
"instance", and every path you add is a "watch".
|
||||
|
||||
These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
|
||||
`/proc/sys/fs/inotify/max_user_instances`
|
||||
|
||||
To increase them you can use `sysctl` or write the value to proc file:
|
||||
|
||||
# The default values on Linux 5.18
|
||||
sysctl fs.inotify.max_user_watches=124983
|
||||
sysctl fs.inotify.max_user_instances=128
|
||||
|
||||
To make the changes persist on reboot edit `/etc/sysctl.conf` or
|
||||
`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
|
||||
distro's documentation):
|
||||
|
||||
fs.inotify.max_user_watches=124983
|
||||
fs.inotify.max_user_instances=128
|
||||
|
||||
Reaching the limit will result in a "no space left on device" or "too many open
|
||||
files" error.
|
||||
|
||||
### kqueue (macOS, all BSD systems)
|
||||
kqueue requires opening a file descriptor for every file that's being watched;
|
||||
so if you're watching a directory with five files then that's six file
|
||||
descriptors. You will run in to your system's "max open files" limit faster on
|
||||
these platforms.
|
||||
|
||||
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
|
||||
control the maximum number of open files.
|
||||
467
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
Normal file
467
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
Normal file
@@ -0,0 +1,467 @@
|
||||
//go:build solaris
|
||||
|
||||
// FEN backend for illumos (supported) and Solaris (untested, but should work).
|
||||
//
|
||||
// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type fen struct {
|
||||
*shared
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
mu sync.Mutex
|
||||
port *unix.EventPort
|
||||
dirs map[string]Op // Explicitly watched directories
|
||||
watches map[string]Op // Explicitly watched non-directories
|
||||
}
|
||||
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
w := &fen{
|
||||
shared: newShared(ev, errs),
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
dirs: make(map[string]Op),
|
||||
watches: make(map[string]Op),
|
||||
}
|
||||
|
||||
var err error
|
||||
w.port, err = unix.NewEventPort()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *fen) Close() error {
|
||||
if w.shared.close() {
|
||||
return nil
|
||||
}
|
||||
return w.port.Close()
|
||||
}
|
||||
|
||||
func (w *fen) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *fen) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
// Currently we resolve symlinks that were explicitly requested to be
|
||||
// watched. Otherwise we would use LStat here.
|
||||
stat, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Associate all files in the directory.
|
||||
if stat.IsDir() {
|
||||
err := w.handleDirectory(name, stat, true, w.associateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.dirs[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
err = w.associateFile(name, stat, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if !w.port.PathIsWatched(name) {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
// The user has expressed an intent. Immediately remove this name from
|
||||
// whichever watch list it might be in. If it's not in there the delete
|
||||
// doesn't cause harm.
|
||||
w.mu.Lock()
|
||||
delete(w.watches, name)
|
||||
delete(w.dirs, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
stat, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove associations for every file in the directory.
|
||||
if stat.IsDir() {
|
||||
err := w.handleDirectory(name, stat, false, w.dissociateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err = w.port.DissociatePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents contains the main loop that runs in a goroutine watching for events.
|
||||
func (w *fen) readEvents() {
|
||||
// If this function returns, the watcher has been closed and we can close
|
||||
// these channels
|
||||
defer func() {
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
pevents := make([]unix.PortEvent, 8)
|
||||
for {
|
||||
count, err := w.port.Get(pevents, 1, nil)
|
||||
if err != nil && err != unix.ETIME {
|
||||
// Interrupted system call (count should be 0) ignore and continue
|
||||
if errors.Is(err, unix.EINTR) && count == 0 {
|
||||
continue
|
||||
}
|
||||
// Get failed because we called w.Close()
|
||||
if errors.Is(err, unix.EBADF) && w.isClosed() {
|
||||
return
|
||||
}
|
||||
// There was an error not caused by calling w.Close()
|
||||
if !w.sendError(fmt.Errorf("port.Get: %w", err)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
p := pevents[:count]
|
||||
for _, pevent := range p {
|
||||
if pevent.Source != unix.PORT_SOURCE_FILE {
|
||||
// Event from unexpected source received; should never happen.
|
||||
if !w.sendError(errors.New("Event from unexpected source received")) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(pevent.Path, pevent.Events)
|
||||
}
|
||||
|
||||
err = w.handleEvent(&pevent)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle all children of the directory.
|
||||
for _, entry := range files {
|
||||
finfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// And finally handle the directory itself.
|
||||
return handler(path, stat, follow)
|
||||
}
|
||||
|
||||
// handleEvent might need to emit more than one fsnotify event if the events
|
||||
// bitmap matches more than one event type (e.g. the file was both modified and
|
||||
// had the attributes changed between when the association was created and the
|
||||
// when event was returned)
|
||||
func (w *fen) handleEvent(event *unix.PortEvent) error {
|
||||
var (
|
||||
events = event.Events
|
||||
path = event.Path
|
||||
fmode = event.Cookie.(os.FileMode)
|
||||
reRegister = true
|
||||
)
|
||||
|
||||
w.mu.Lock()
|
||||
_, watchedDir := w.dirs[path]
|
||||
_, watchedPath := w.watches[path]
|
||||
w.mu.Unlock()
|
||||
isWatched := watchedDir || watchedPath
|
||||
|
||||
if events&unix.FILE_DELETE != 0 {
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_FROM != 0 {
|
||||
if !w.sendEvent(Event{Name: path, Op: Rename}) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the new file name
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_TO != 0 {
|
||||
// We don't report a Rename event for this case, because Rename events
|
||||
// are interpreted as referring to the _old_ name of the file, and in
|
||||
// this case the event would refer to the new name of the file. This
|
||||
// type of rename event is not supported by fsnotify.
|
||||
|
||||
// inotify reports a Remove event in this case, so we simulate this
|
||||
// here.
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the file that was removed
|
||||
reRegister = false
|
||||
}
|
||||
|
||||
// The file is gone, nothing left to do.
|
||||
if !reRegister {
|
||||
if watchedDir {
|
||||
w.mu.Lock()
|
||||
delete(w.dirs, path)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
if watchedPath {
|
||||
w.mu.Lock()
|
||||
delete(w.watches, path)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we didn't get a deletion the file still exists and we're going to have
|
||||
// to watch it again. Let's Stat it now so that we can compare permissions
|
||||
// and have what we need to continue watching the file
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
// This is unexpected, but we should still emit an event. This happens
|
||||
// most often on "rm -r" of a subdirectory inside a watched directory We
|
||||
// get a modify event of something happening inside, but by the time we
|
||||
// get here, the sudirectory is already gone. Clearly we were watching
|
||||
// this path but now it is gone. Let's tell the user that it was
|
||||
// removed.
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Suppress extra write events on removed directories; they are not
|
||||
// informative and can be confusing.
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolve symlinks that were explicitly watched as we would have at Add()
|
||||
// time. this helps suppress spurious Chmod events on watched symlinks
|
||||
if isWatched {
|
||||
stat, err = os.Stat(path)
|
||||
if err != nil {
|
||||
// The symlink still exists, but the target is gone. Report the
|
||||
// Remove similar to above.
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Don't return the error
|
||||
}
|
||||
}
|
||||
|
||||
if events&unix.FILE_MODIFIED != 0 {
|
||||
if fmode.IsDir() && watchedDir {
|
||||
if err := w.updateDirectory(path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !w.sendEvent(Event{Name: path, Op: Write}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if events&unix.FILE_ATTRIB != 0 && stat != nil {
|
||||
// Only send Chmod if perms changed
|
||||
if stat.Mode().Perm() != fmode.Perm() {
|
||||
if !w.sendEvent(Event{Name: path, Op: Chmod}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if stat != nil {
|
||||
// If we get here, it means we've hit an event above that requires us to
|
||||
// continue watching the file or directory
|
||||
err := w.associateFile(path, stat, isWatched)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// Path may have been removed since the stat.
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The directory was modified, so we must find unwatched entities and watch
|
||||
// them. If something was removed from the directory, nothing will happen, as
|
||||
// everything else should still be watched.
|
||||
func (w *fen) updateDirectory(path string) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
// Directory no longer exists: probably just deleted since we got the
|
||||
// event.
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range files {
|
||||
path := filepath.Join(path, entry.Name())
|
||||
if w.port.PathIsWatched(path) {
|
||||
continue
|
||||
}
|
||||
|
||||
finfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.associateFile(path, finfo, false)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// File may have disappeared between getting the dir listing and
|
||||
// adding the port: that's okay to ignore.
|
||||
continue
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return nil
|
||||
}
|
||||
if !w.sendEvent(Event{Name: path, Op: Create}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
// This is primarily protecting the call to AssociatePath but it is
|
||||
// important and intentional that the call to PathIsWatched is also
|
||||
// protected by this mutex. Without this mutex, AssociatePath has been seen
|
||||
// to error out that the path is already associated.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if w.port.PathIsWatched(path) {
|
||||
// Remove the old association in favor of this one If we get ENOENT,
|
||||
// then while the x/sys/unix wrapper still thought that this path was
|
||||
// associated, the underlying event port did not. This call will have
|
||||
// cleared up that discrepancy. The most likely cause is that the event
|
||||
// has fired but we haven't processed it yet.
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil && !errors.Is(err, unix.ENOENT) {
|
||||
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
var events int
|
||||
if !follow {
|
||||
// Watch symlinks themselves rather than their targets unless this entry
|
||||
// is explicitly watched.
|
||||
events |= unix.FILE_NOFOLLOW
|
||||
}
|
||||
if true { // TODO: implement withOps()
|
||||
events |= unix.FILE_MODIFIED
|
||||
}
|
||||
if true {
|
||||
events |= unix.FILE_ATTRIB
|
||||
}
|
||||
err := w.port.AssociatePath(path, stat, events, stat.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("port.AssociatePath(%q): %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||
if !w.port.PathIsWatched(path) {
|
||||
return nil
|
||||
}
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches)+len(w.dirs))
|
||||
for pathname := range w.dirs {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
func (w *fen) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
583
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
Normal file
583
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
Normal file
@@ -0,0 +1,583 @@
|
||||
//go:build linux && !appengine
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type inotify struct {
|
||||
*shared
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
// Store fd here as os.File.Read() will no longer return on close after
|
||||
// calling Fd(). See: https://github.com/golang/go/issues/26439
|
||||
fd int
|
||||
inotifyFile *os.File
|
||||
watches *watches
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
|
||||
// Store rename cookies in an array, with the index wrapping to 0. Almost
|
||||
// all of the time what we get is a MOVED_FROM to set the cookie and the
|
||||
// next event inotify sends will be MOVED_TO to read it. However, this is
|
||||
// not guaranteed – as described in inotify(7) – and we may get other events
|
||||
// between the two MOVED_* events (including other MOVED_* ones).
|
||||
//
|
||||
// A second issue is that moving a file outside the watched directory will
|
||||
// trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
|
||||
// read and delete it. So just storing it in a map would slowly leak memory.
|
||||
//
|
||||
// Doing it like this gives us a simple fast LRU-cache that won't allocate.
|
||||
// Ten items should be more than enough for our purpose, and a loop over
|
||||
// such a short array is faster than a map access anyway (not that it hugely
|
||||
// matters since we're talking about hundreds of ns at the most, but still).
|
||||
cookies [10]koekje
|
||||
cookieIndex uint8
|
||||
cookiesMu sync.Mutex
|
||||
}
|
||||
|
||||
type (
|
||||
watches struct {
|
||||
wd map[uint32]*watch // wd → watch
|
||||
path map[string]uint32 // pathname → wd
|
||||
}
|
||||
watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
path string // Watch path.
|
||||
recurse bool // Recursion with ./...?
|
||||
}
|
||||
koekje struct {
|
||||
cookie uint32
|
||||
path string
|
||||
}
|
||||
)
|
||||
|
||||
func newWatches() *watches {
|
||||
return &watches{
|
||||
wd: make(map[uint32]*watch),
|
||||
path: make(map[string]uint32),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] }
|
||||
func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] }
|
||||
func (w *watches) len() int { return len(w.wd) }
|
||||
func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd }
|
||||
func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) }
|
||||
|
||||
func (w *watches) removePath(path string) ([]uint32, error) {
|
||||
path, recurse := recursivePath(path)
|
||||
wd, ok := w.path[path]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
|
||||
}
|
||||
|
||||
watch := w.wd[wd]
|
||||
if recurse && !watch.recurse {
|
||||
return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
|
||||
}
|
||||
|
||||
delete(w.path, path)
|
||||
delete(w.wd, wd)
|
||||
if !watch.recurse {
|
||||
return []uint32{wd}, nil
|
||||
}
|
||||
|
||||
wds := make([]uint32, 0, 8)
|
||||
wds = append(wds, wd)
|
||||
for p, rwd := range w.path {
|
||||
if strings.HasPrefix(p, path) {
|
||||
delete(w.path, p)
|
||||
delete(w.wd, rwd)
|
||||
wds = append(wds, rwd)
|
||||
}
|
||||
}
|
||||
return wds, nil
|
||||
}
|
||||
|
||||
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
|
||||
var existing *watch
|
||||
wd, ok := w.path[path]
|
||||
if ok {
|
||||
existing = w.wd[wd]
|
||||
}
|
||||
|
||||
upd, err := f(existing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if upd != nil {
|
||||
w.wd[upd.wd] = upd
|
||||
w.path[upd.path] = upd.wd
|
||||
|
||||
if upd.wd != wd {
|
||||
delete(w.wd, wd)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
|
||||
// I/O operations won't terminate on close.
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
w := &inotify{
|
||||
shared: newShared(ev, errs),
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
fd: fd,
|
||||
inotifyFile: os.NewFile(uintptr(fd), ""),
|
||||
watches: newWatches(),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *inotify) Close() error {
|
||||
if w.shared.close() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Causes any blocking reads to return with an error, provided the file
|
||||
// still supports deadline operations.
|
||||
err := w.inotifyFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-w.doneResp // Wait for readEvents() to finish.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *inotify) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *inotify) AddWith(path string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), path)
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
add := func(path string, with withOpts, recurse bool) error {
|
||||
var flags uint32
|
||||
if with.noFollow {
|
||||
flags |= unix.IN_DONT_FOLLOW
|
||||
}
|
||||
if with.op.Has(Create) {
|
||||
flags |= unix.IN_CREATE
|
||||
}
|
||||
if with.op.Has(Write) {
|
||||
flags |= unix.IN_MODIFY
|
||||
}
|
||||
if with.op.Has(Remove) {
|
||||
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
}
|
||||
if with.op.Has(Rename) {
|
||||
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
|
||||
}
|
||||
if with.op.Has(Chmod) {
|
||||
flags |= unix.IN_ATTRIB
|
||||
}
|
||||
if with.op.Has(xUnportableOpen) {
|
||||
flags |= unix.IN_OPEN
|
||||
}
|
||||
if with.op.Has(xUnportableRead) {
|
||||
flags |= unix.IN_ACCESS
|
||||
}
|
||||
if with.op.Has(xUnportableCloseWrite) {
|
||||
flags |= unix.IN_CLOSE_WRITE
|
||||
}
|
||||
if with.op.Has(xUnportableCloseRead) {
|
||||
flags |= unix.IN_CLOSE_NOWRITE
|
||||
}
|
||||
return w.register(path, flags, recurse)
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
path, recurse := recursivePath(path)
|
||||
if recurse {
|
||||
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() {
|
||||
if root == path {
|
||||
return fmt.Errorf("fsnotify: not a directory: %q", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send a Create event when adding new directory from a recursive
|
||||
// watch; this is for "mkdir -p one/two/three". Usually all those
|
||||
// directories will be created before we can set up watchers on the
|
||||
// subdirectories, so only "one" would be sent as a Create event and
|
||||
// not "one/two" and "one/two/three" (inotifywait -r has the same
|
||||
// problem).
|
||||
if with.sendCreate && root != path {
|
||||
w.sendEvent(Event{Name: root, Op: Create})
|
||||
}
|
||||
|
||||
return add(root, with, true)
|
||||
})
|
||||
}
|
||||
|
||||
return add(path, with, false)
|
||||
}
|
||||
|
||||
func (w *inotify) register(path string, flags uint32, recurse bool) error {
|
||||
return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
|
||||
if existing != nil {
|
||||
flags |= existing.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
|
||||
wd, err := unix.InotifyAddWatch(w.fd, path, flags)
|
||||
if wd == -1 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if e, ok := w.watches.wd[uint32(wd)]; ok {
|
||||
return e, nil
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return &watch{
|
||||
wd: uint32(wd),
|
||||
path: path,
|
||||
flags: flags,
|
||||
recurse: recurse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
existing.wd = uint32(wd)
|
||||
existing.flags = flags
|
||||
return existing, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (w *inotify) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.remove(filepath.Clean(name))
|
||||
}
|
||||
|
||||
func (w *inotify) remove(name string) error {
|
||||
wds, err := w.watches.removePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, wd := range wds {
|
||||
_, err := unix.InotifyRmWatch(w.fd, wd)
|
||||
if err != nil {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every
|
||||
// case; the only two possible errors are:
|
||||
//
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of
|
||||
// any kind.
|
||||
//
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is
|
||||
// not a valid watch descriptor. Watch descriptors are invalidated
|
||||
// when they are removed explicitly or implicitly; explicitly by
|
||||
// inotify_rm_watch, implicitly when the file they are watching is
|
||||
// deleted.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *inotify) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
entries := make([]string, 0, w.watches.len())
|
||||
for pathname := range w.watches.path {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *inotify) readEvents() {
|
||||
defer func() {
|
||||
close(w.doneResp)
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
for {
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := w.inotifyFile.Read(buf[:])
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
err := errors.New("notify: short read in readEvents()") // Read was too short.
|
||||
if n == 0 {
|
||||
err = io.EOF // If EOF is received. This should really never happen.
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't know how many events we just read into the buffer While the
|
||||
// offset points to at least one whole event.
|
||||
var offset uint32
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
// Point to the event in the buffer.
|
||||
inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
if !w.sendError(ErrEventOverflow) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ev, ok := w.handleEvent(inEvent, &buf, offset)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if !w.sendEvent(ev) {
|
||||
return
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + inEvent.Len
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
/// If the event happened to the watched directory or the watched file, the
|
||||
/// kernel doesn't append the filename to the event, but we would like to
|
||||
/// always fill the the "Name" field with a valid filename. We retrieve the
|
||||
/// path of the watch from the "paths" map.
|
||||
///
|
||||
/// Can be nil if Remove() was called in another goroutine for this path
|
||||
/// inbetween reading the events from the kernel and reading the internal
|
||||
/// state. Not much we can do about it, so just skip. See #616.
|
||||
watch := w.watches.byWd(uint32(inEvent.Wd))
|
||||
if watch == nil {
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
var (
|
||||
name = watch.path
|
||||
nameLen = uint32(inEvent.Len)
|
||||
)
|
||||
if nameLen > 0 {
|
||||
/// Point "bytes" at the first byte of the filename
|
||||
bb := *buf
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00")
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(name, inEvent.Mask, inEvent.Cookie)
|
||||
}
|
||||
|
||||
if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 {
|
||||
w.watches.remove(watch)
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
// inotify will automatically remove the watch on deletes; just need
|
||||
// to clean our state here.
|
||||
if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
w.watches.remove(watch)
|
||||
}
|
||||
|
||||
// We can't really update the state when a watched path is moved; only
|
||||
// IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch.
|
||||
if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||
if watch.recurse { // Do nothing
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
err := w.remove(watch.path)
|
||||
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
|
||||
if !w.sendError(err) {
|
||||
return Event{}, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Skip if we're watching both this path and the parent; the parent will
|
||||
/// already send a delete so no need to do it twice.
|
||||
if inEvent.Mask&unix.IN_DELETE_SELF != 0 {
|
||||
_, ok := w.watches.path[filepath.Dir(watch.path)]
|
||||
if ok {
|
||||
return Event{}, true
|
||||
}
|
||||
}
|
||||
|
||||
ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie)
|
||||
// Need to update watch path for recurse.
|
||||
if watch.recurse {
|
||||
isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR
|
||||
/// New directory created: set up watch on it.
|
||||
if isDir && ev.Has(Create) {
|
||||
err := w.register(ev.Name, watch.flags, true)
|
||||
if !w.sendError(err) {
|
||||
return Event{}, false
|
||||
}
|
||||
|
||||
// This was a directory rename, so we need to update all the
|
||||
// children.
|
||||
//
|
||||
// TODO: this is of course pretty slow; we should use a better data
|
||||
// structure for storing all of this, e.g. store children in the
|
||||
// watch. I have some code for this in my kqueue refactor we can use
|
||||
// in the future. For now I'm okay with this as it's not publicly
|
||||
// available. Correctness first, performance second.
|
||||
if ev.renamedFrom != "" {
|
||||
for k, ww := range w.watches.wd {
|
||||
if k == watch.wd || ww.path == ev.Name {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(ww.path, ev.renamedFrom) {
|
||||
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
|
||||
w.watches.wd[k] = ww
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ev, true
|
||||
}
|
||||
|
||||
func (w *inotify) isRecursive(path string) bool {
|
||||
ww := w.watches.byPath(path)
|
||||
if ww == nil { // path could be a file, so also check the Dir.
|
||||
ww = w.watches.byPath(filepath.Dir(path))
|
||||
}
|
||||
return ww != nil && ww.recurse
|
||||
}
|
||||
|
||||
func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_OPEN == unix.IN_OPEN {
|
||||
e.Op |= xUnportableOpen
|
||||
}
|
||||
if mask&unix.IN_ACCESS == unix.IN_ACCESS {
|
||||
e.Op |= xUnportableRead
|
||||
}
|
||||
if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
|
||||
e.Op |= xUnportableCloseWrite
|
||||
}
|
||||
if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
|
||||
e.Op |= xUnportableCloseRead
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
|
||||
if cookie != 0 {
|
||||
if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
w.cookiesMu.Lock()
|
||||
w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
|
||||
w.cookieIndex++
|
||||
if w.cookieIndex > 9 {
|
||||
w.cookieIndex = 0
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
} else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
w.cookiesMu.Lock()
|
||||
var prev string
|
||||
for _, c := range w.cookies {
|
||||
if c.cookie == cookie {
|
||||
prev = c.path
|
||||
break
|
||||
}
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
e.renamedFrom = prev
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (w *inotify) xSupports(op Op) bool {
|
||||
return true // Supports everything.
|
||||
}
|
||||
|
||||
func (w *inotify) state() {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
for wd, ww := range w.watches.wd {
|
||||
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
|
||||
}
|
||||
}
|
||||
705
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
Normal file
705
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
Normal file
@@ -0,0 +1,705 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type kqueue struct {
|
||||
*shared
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
closepipe [2]int // Pipe used for closing kq.
|
||||
watches *watches
|
||||
}
|
||||
|
||||
type (
|
||||
watches struct {
|
||||
mu sync.RWMutex
|
||||
wd map[int]watch // wd → watch
|
||||
path map[string]int // pathname → wd
|
||||
byDir map[string]map[int]struct{} // dirname(path) → wd
|
||||
seen map[string]struct{} // Keep track of if we know this file exists.
|
||||
byUser map[string]struct{} // Watches added with Watcher.Add()
|
||||
}
|
||||
watch struct {
|
||||
wd int
|
||||
name string
|
||||
linkName string // In case of links; name is the target, and this is the link.
|
||||
isDir bool
|
||||
dirFlags uint32
|
||||
}
|
||||
)
|
||||
|
||||
func newWatches() *watches {
|
||||
return &watches{
|
||||
wd: make(map[int]watch),
|
||||
path: make(map[string]int),
|
||||
byDir: make(map[string]map[int]struct{}),
|
||||
seen: make(map[string]struct{}),
|
||||
byUser: make(map[string]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) listPaths(userOnly bool) []string {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
if userOnly {
|
||||
l := make([]string, 0, len(w.byUser))
|
||||
for p := range w.byUser {
|
||||
l = append(l, p)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
l := make([]string, 0, len(w.path))
|
||||
for p := range w.path {
|
||||
l = append(l, p)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (w *watches) watchesInDir(path string) []string {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
l := make([]string, 0, 4)
|
||||
for fd := range w.byDir[path] {
|
||||
info := w.wd[fd]
|
||||
if _, ok := w.byUser[info.name]; !ok {
|
||||
l = append(l, info.name)
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Mark path as added by the user.
|
||||
func (w *watches) addUserWatch(path string) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.byUser[path] = struct{}{}
|
||||
}
|
||||
|
||||
func (w *watches) addLink(path string, fd int) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.path[path] = fd
|
||||
w.seen[path] = struct{}{}
|
||||
}
|
||||
|
||||
func (w *watches) add(path, linkPath string, fd int, isDir bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.path[path] = fd
|
||||
w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir}
|
||||
|
||||
parent := filepath.Dir(path)
|
||||
byDir, ok := w.byDir[parent]
|
||||
if !ok {
|
||||
byDir = make(map[int]struct{}, 1)
|
||||
w.byDir[parent] = byDir
|
||||
}
|
||||
byDir[fd] = struct{}{}
|
||||
}
|
||||
|
||||
func (w *watches) byWd(fd int) (watch, bool) {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
info, ok := w.wd[fd]
|
||||
return info, ok
|
||||
}
|
||||
|
||||
func (w *watches) byPath(path string) (watch, bool) {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
info, ok := w.wd[w.path[path]]
|
||||
return info, ok
|
||||
}
|
||||
|
||||
func (w *watches) updateDirFlags(path string, flags uint32) bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
fd, ok := w.path[path]
|
||||
if !ok { // Already deleted: don't re-set it here.
|
||||
return false
|
||||
}
|
||||
info := w.wd[fd]
|
||||
info.dirFlags = flags
|
||||
w.wd[fd] = info
|
||||
return true
|
||||
}
|
||||
|
||||
func (w *watches) remove(fd int, path string) bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
isDir := w.wd[fd].isDir
|
||||
delete(w.path, path)
|
||||
delete(w.byUser, path)
|
||||
|
||||
parent := filepath.Dir(path)
|
||||
delete(w.byDir[parent], fd)
|
||||
|
||||
if len(w.byDir[parent]) == 0 {
|
||||
delete(w.byDir, parent)
|
||||
}
|
||||
|
||||
delete(w.wd, fd)
|
||||
delete(w.seen, path)
|
||||
return isDir
|
||||
}
|
||||
|
||||
func (w *watches) markSeen(path string, exists bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if exists {
|
||||
w.seen[path] = struct{}{}
|
||||
} else {
|
||||
delete(w.seen, path)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) seenBefore(path string) bool {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
_, ok := w.seen[path]
|
||||
return ok
|
||||
}
|
||||
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
kq, closepipe, err := newKqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &kqueue{
|
||||
shared: newShared(ev, errs),
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
kq: kq,
|
||||
closepipe: closepipe,
|
||||
watches: newWatches(),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// newKqueue creates a new kernel event queue and returns a descriptor.
|
||||
//
|
||||
// This registers a new event on closepipe, which will trigger an event when
|
||||
// it's closed. This way we can use kevent() without timeout/polling; without
|
||||
// the closepipe, it would block forever and we wouldn't be able to stop it at
|
||||
// all.
|
||||
func newKqueue() (kq int, closepipe [2]int, err error) {
|
||||
kq, err = unix.Kqueue()
|
||||
if err != nil {
|
||||
return kq, closepipe, err
|
||||
}
|
||||
|
||||
// Register the close pipe.
|
||||
err = unix.Pipe(closepipe[:])
|
||||
if err != nil {
|
||||
unix.Close(kq)
|
||||
return kq, closepipe, err
|
||||
}
|
||||
unix.CloseOnExec(closepipe[0])
|
||||
unix.CloseOnExec(closepipe[1])
|
||||
|
||||
// Register changes to listen on the closepipe.
|
||||
changes := make([]unix.Kevent_t, 1)
|
||||
// SetKevent converts int to the platform-specific types.
|
||||
unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
|
||||
unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
|
||||
|
||||
ok, err := unix.Kevent(kq, changes, nil, nil)
|
||||
if ok == -1 {
|
||||
unix.Close(kq)
|
||||
unix.Close(closepipe[0])
|
||||
unix.Close(closepipe[1])
|
||||
return kq, closepipe, err
|
||||
}
|
||||
return kq, closepipe, nil
|
||||
}
|
||||
|
||||
func (w *kqueue) Close() error {
|
||||
if w.shared.close() {
|
||||
return nil
|
||||
}
|
||||
|
||||
pathsToRemove := w.watches.listPaths(false)
|
||||
for _, name := range pathsToRemove {
|
||||
w.Remove(name)
|
||||
}
|
||||
|
||||
unix.Close(w.closepipe[1]) // Send "quit" message to readEvents
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *kqueue) AddWith(name string, opts ...addOpt) error {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
_, err := w.addWatch(name, noteAllEvents, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.watches.addUserWatch(name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) Remove(name string) error {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
return w.remove(name, true)
|
||||
}
|
||||
|
||||
func (w *kqueue) remove(name string, unwatchFiles bool) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
name = filepath.Clean(name)
|
||||
info, ok := w.watches.byPath(name)
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
|
||||
err := w.register([]int{info.wd}, unix.EV_DELETE, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unix.Close(info.wd)
|
||||
|
||||
isDir := w.watches.remove(info.wd, name)
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if unwatchFiles && isDir {
|
||||
pathsToRemove := w.watches.watchesInDir(name)
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error to
|
||||
// the user, as that will just confuse them with an error about a
|
||||
// path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
return w.watches.listPaths(true)
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// addWatch adds name to the watched file set; the flags are interpreted as
|
||||
// described in kevent(2).
|
||||
//
|
||||
// Returns the real path to the file which was added, with symlinks resolved.
|
||||
func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) {
|
||||
if w.isClosed() {
|
||||
return "", ErrClosed
|
||||
}
|
||||
|
||||
name = filepath.Clean(name)
|
||||
|
||||
info, alreadyWatching := w.watches.byPath(name)
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Don't watch sockets or named pipes.
|
||||
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow symlinks, but only for paths added with Add(), and not paths
|
||||
// we're adding from internalWatch from a listdir.
|
||||
if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
link, err := os.Readlink(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !filepath.IsAbs(link) {
|
||||
link = filepath.Join(filepath.Dir(name), link)
|
||||
}
|
||||
|
||||
_, alreadyWatching = w.watches.byPath(link)
|
||||
if alreadyWatching {
|
||||
// Add to watches so we don't get spurious Create events later
|
||||
// on when we diff the directories.
|
||||
w.watches.addLink(name, 0)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
info.linkName = name
|
||||
name = link
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Retry on EINTR; open() can return EINTR in practice on macOS.
|
||||
// See #354, and Go issues 11180 and 39237.
|
||||
for {
|
||||
info.wd, err = unix.Open(name, openMode, 0)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if errors.Is(err, unix.EINTR) {
|
||||
continue
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
info.isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
|
||||
if err != nil {
|
||||
unix.Close(info.wd)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.watches.add(name, info.linkName, info.wd, info.isDir)
|
||||
}
|
||||
|
||||
// Watch the directory if it has not been watched before, or if it was
|
||||
// watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
if info.isDir {
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
(!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||
if !w.watches.updateDirFlags(name, flags) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if watchDir {
|
||||
d := name
|
||||
if info.linkName != "" {
|
||||
d = info.linkName
|
||||
}
|
||||
if err := w.watchDirectoryFiles(d); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *kqueue) readEvents() {
|
||||
defer func() {
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
_ = unix.Close(w.kq)
|
||||
unix.Close(w.closepipe[0])
|
||||
}()
|
||||
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
for {
|
||||
kevents, err := w.read(eventBuffer)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != unix.EINTR {
|
||||
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, kevent := range kevents {
|
||||
var (
|
||||
wd = int(kevent.Ident)
|
||||
mask = uint32(kevent.Fflags)
|
||||
)
|
||||
|
||||
// Shut down the loop when the pipe is closed, but only after all
|
||||
// other events have been processed.
|
||||
if wd == w.closepipe[0] {
|
||||
return
|
||||
}
|
||||
|
||||
path, ok := w.watches.byWd(wd)
|
||||
if debug {
|
||||
internal.Debug(path.name, &kevent)
|
||||
}
|
||||
|
||||
// On macOS it seems that sometimes an event with Ident=0 is
|
||||
// delivered, and no other flags/information beyond that, even
|
||||
// though we never saw such a file descriptor. For example in
|
||||
// TestWatchSymlink/277 (usually at the end, but sometimes sooner):
|
||||
//
|
||||
// fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent)
|
||||
// unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
|
||||
// unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
|
||||
//
|
||||
// The first is a normal event, the second with Ident 0. No error
|
||||
// flag, no data, no ... nothing.
|
||||
//
|
||||
// I read a bit through bsd/kern_event.c from the xnu source, but I
|
||||
// don't really see an obvious location where this is triggered –
|
||||
// this doesn't seem intentional, but idk...
|
||||
//
|
||||
// Technically fd 0 is a valid descriptor, so only skip it if
|
||||
// there's no path, and if we're on macOS.
|
||||
if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" {
|
||||
continue
|
||||
}
|
||||
|
||||
event := w.newEvent(path.name, path.linkName, mask)
|
||||
|
||||
if event.Has(Rename) || event.Has(Remove) {
|
||||
w.remove(event.Name, false)
|
||||
w.watches.markSeen(event.Name, false)
|
||||
}
|
||||
|
||||
if path.isDir && event.Has(Write) && !event.Has(Remove) {
|
||||
w.dirChange(event.Name)
|
||||
} else if !w.sendEvent(event) {
|
||||
return
|
||||
}
|
||||
|
||||
if event.Has(Remove) {
|
||||
// Look for a file that may have overwritten this; for example,
|
||||
// mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
_, found := w.watches.byPath(fileDir)
|
||||
if found {
|
||||
// TODO: this branch is never triggered in any test.
|
||||
// Added in d6220df (2012).
|
||||
// isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111
|
||||
//
|
||||
// I don't really get how this can be triggered either.
|
||||
// And it wasn't triggered in the patch that added it,
|
||||
// either.
|
||||
//
|
||||
// Original also had a comment:
|
||||
// make sure the directory exists before we watch for
|
||||
// changes. When we do a recursive watch and perform
|
||||
// rm -rf, the parent directory might have gone
|
||||
// missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the
|
||||
// parent directory.
|
||||
err := w.dirChange(fileDir)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
path := filepath.Clean(event.Name)
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
err := w.sendCreateIfNew(path, fi)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func (w *kqueue) newEvent(name, linkName string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if linkName != "" {
|
||||
// If the user watched "/path/link" then emit events as "/path/link"
|
||||
// rather than "/path/target".
|
||||
e.Name = linkName
|
||||
}
|
||||
|
||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
// No point sending a write and delete event at the same time: if it's gone,
|
||||
// then it's gone.
|
||||
if e.Op.Has(Write) && e.Op.Has(Remove) {
|
||||
e.Op &^= Write
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *kqueue) watchDirectoryFiles(dirPath string) error {
|
||||
files, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
path := filepath.Join(dirPath, f.Name())
|
||||
|
||||
fi, err := f.Info()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%q: %w", path, err)
|
||||
}
|
||||
|
||||
cleanPath, err := w.internalWatch(path, fi)
|
||||
if err != nil {
|
||||
// No permission to read the file; that's not a problem: just skip.
|
||||
// But do add it to w.fileExists to prevent it from being picked up
|
||||
// as a "new" file later (it still shows up in the directory
|
||||
// listing).
|
||||
switch {
|
||||
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
|
||||
cleanPath = filepath.Clean(path)
|
||||
default:
|
||||
return fmt.Errorf("%q: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
w.watches.markSeen(cleanPath, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search the directory for new files and send an event for them.
|
||||
//
|
||||
// This functionality is to have the BSD watcher match the inotify, which sends
|
||||
// a create event for files created in a watched directory.
|
||||
func (w *kqueue) dirChange(dir string) error {
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
// Directory no longer exists: we can ignore this safely. kqueue will
|
||||
// still give us the correct events.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err)
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
fi, err := f.Info()
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("fsnotify.dirChange: %w", err)
|
||||
}
|
||||
|
||||
err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
|
||||
if err != nil {
|
||||
// Don't need to send an error if this file isn't readable.
|
||||
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("fsnotify.dirChange: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send a create event if the file isn't already being tracked, and start
|
||||
// watching this file.
|
||||
func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error {
|
||||
if !w.watches.seenBefore(path) {
|
||||
if !w.sendEvent(Event{Name: path, Op: Create}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Like watchDirectoryFiles, but without doing another ReadDir.
|
||||
path, err := w.internalWatch(path, fi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.watches.markSeen(path, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
|
||||
if fi.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories, but preserve
|
||||
// the flags used if currently watching subdirectory
|
||||
info, _ := w.watches.byPath(name)
|
||||
return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true)
|
||||
}
|
||||
|
||||
// Watch file to mimic Linux inotify.
|
||||
return w.addWatch(name, noteAllEvents, true)
|
||||
}
|
||||
|
||||
// Register events with the queue.
|
||||
func (w *kqueue) register(fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]unix.Kevent_t, len(fds))
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types.
|
||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// Register the events.
|
||||
success, err := unix.Kevent(w.kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
|
||||
n, err := unix.Kevent(w.kq, nil, events, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
||||
func (w *kqueue) xSupports(op Op) bool {
|
||||
//if runtime.GOOS == "freebsd" {
|
||||
// return true // Supports everything.
|
||||
//}
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
22
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
Normal file
22
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "errors"
|
||||
|
||||
type other struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return nil, errors.New("fsnotify not supported on the current platform")
|
||||
}
|
||||
func (w *other) Close() error { return nil }
|
||||
func (w *other) WatchList() []string { return nil }
|
||||
func (w *other) Add(name string) error { return nil }
|
||||
func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
|
||||
func (w *other) Remove(name string) error { return nil }
|
||||
func (w *other) xSupports(op Op) bool { return false }
|
||||
680
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
Normal file
680
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
Normal file
@@ -0,0 +1,680 @@
|
||||
//go:build windows
|
||||
|
||||
// Windows backend based on ReadDirectoryChangesW()
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type readDirChangesW struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
port windows.Handle // Handle to completion port
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
done chan chan<- error
|
||||
|
||||
mu sync.Mutex // Protects access to watches, closed
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
closed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
var defaultBufferSize = 50
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
w := &readDirChangesW{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
done: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) isClosed() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.closed
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
event := w.newEvent(name, uint32(mask))
|
||||
event.renamedFrom = renamedFrom
|
||||
select {
|
||||
case ch := <-w.done:
|
||||
w.done <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *readDirChangesW) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.closed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// Send "done" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.done <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
if with.bufsize < 4096 {
|
||||
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
|
||||
}
|
||||
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
bufsize: with.bufsize,
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for _, entry := range w.watches {
|
||||
for _, watchEntry := range entry {
|
||||
for name := range watchEntry.names {
|
||||
entries = append(entries, filepath.Join(watchEntry.path, name))
|
||||
}
|
||||
// the directory itself is being watched
|
||||
if watchEntry.mask != 0 {
|
||||
entries = append(entries, watchEntry.path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// These options are from the old golang.org/x/exp/winfsnotify, where you could
|
||||
// add various options to the watch. This has long since been removed.
|
||||
//
|
||||
// The "sys" in the name is misleading as they're not part of any "system".
|
||||
//
|
||||
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
|
||||
const (
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
sysFSMODIFY = 0x2
|
||||
sysFSMOVE = 0xc0
|
||||
sysFSMOVEDFROM = 0x40
|
||||
sysFSMOVEDTO = 0x80
|
||||
sysFSMOVESELF = 0x800
|
||||
sysFSIGNORED = 0x8000
|
||||
)
|
||||
|
||||
func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
bufsize int
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle windows.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov windows.Overlapped
|
||||
ino *inode // i-number
|
||||
recurse bool // Recursive watch?
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf []byte // buffer, allocated later
|
||||
}
|
||||
|
||||
type (
|
||||
indexMap map[uint64]*watch
|
||||
watchMap map[uint32]indexMap
|
||||
)
|
||||
|
||||
func (w *readDirChangesW) wakeupReader() error {
|
||||
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
|
||||
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
|
||||
if err != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", err)
|
||||
}
|
||||
if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
|
||||
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
|
||||
windows.FILE_LIST_DIRECTORY,
|
||||
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
|
||||
nil, windows.OPEN_EXISTING,
|
||||
windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", err)
|
||||
}
|
||||
|
||||
var fi windows.ByHandleFileInformation
|
||||
err = windows.GetFileInformationByHandle(h, &fi)
|
||||
if err != nil {
|
||||
windows.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", err)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ino, err := w.getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
_, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
|
||||
if err != nil {
|
||||
windows.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
recurse: recurse,
|
||||
buf: make([]byte, bufsize),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
windows.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
|
||||
err = w.startRead(watchEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) remWatch(pathname string) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := w.getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
|
||||
if recurse && !watch.recurse {
|
||||
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
|
||||
}
|
||||
|
||||
err = windows.CloseHandle(ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||
}
|
||||
if watch == nil {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) startRead(watch *watch) error {
|
||||
err := windows.CancelIo(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CancelIo", err))
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := w.toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= w.toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
err := windows.CloseHandle(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// We need to pass the array, rather than the slice.
|
||||
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
|
||||
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
|
||||
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
|
||||
watch.recurse, mask, nil, &watch.ov, 0)
|
||||
if rdErr != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
||||
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *readDirChangesW) readEvents() {
|
||||
var (
|
||||
n uint32
|
||||
key uintptr
|
||||
ov *windows.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
// This error is handled after the watch == nil check below.
|
||||
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
|
||||
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.done:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
|
||||
err := windows.CloseHandle(w.port)
|
||||
if err != nil {
|
||||
err = os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch qErr {
|
||||
case nil:
|
||||
// No error
|
||||
case windows.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case windows.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case windows.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.sendError(ErrEventOverflow)
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
|
||||
// Create a buf that is the size of the path name
|
||||
size := int(raw.FileNameLength / 2)
|
||||
var buf []uint16
|
||||
// TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
|
||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
|
||||
sh.Len = size
|
||||
sh.Cap = size
|
||||
name := windows.UTF16ToString(buf)
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
if debug {
|
||||
internal.Debug(fullname, raw.Action)
|
||||
}
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
mask = sysFSDELETESELF
|
||||
case windows.FILE_ACTION_MODIFIED:
|
||||
mask = sysFSMODIFY
|
||||
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
// Update saved path of all sub-watches.
|
||||
old := filepath.Join(watch.path, watch.rename)
|
||||
w.mu.Lock()
|
||||
for _, watchMap := range w.watches {
|
||||
for _, ww := range watchMap {
|
||||
if strings.HasPrefix(ww.path, old) {
|
||||
ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sysFSMOVESELF
|
||||
}
|
||||
}
|
||||
|
||||
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(fullname, "", watch.names[name]&mask)
|
||||
}
|
||||
if raw.Action == windows.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
} else {
|
||||
w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
}
|
||||
|
||||
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
//lint:ignore ST1005 Windows should be capitalized
|
||||
w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.sendError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case windows.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
return sysFSDELETE
|
||||
case windows.FILE_ACTION_MODIFIED:
|
||||
return sysFSMODIFY
|
||||
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sysFSMOVEDFROM
|
||||
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sysFSMOVEDTO
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
496
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
496
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
@@ -0,0 +1,496 @@
|
||||
// Package fsnotify provides a cross-platform interface for file system
|
||||
// notifications.
|
||||
//
|
||||
// Currently supported systems:
|
||||
//
|
||||
// - Linux via inotify
|
||||
// - BSD, macOS via kqueue
|
||||
// - Windows via ReadDirectoryChangesW
|
||||
// - illumos via FEN
|
||||
//
|
||||
// # FSNOTIFY_DEBUG
|
||||
//
|
||||
// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
|
||||
// stderr. This can be useful to track down some problems, especially in cases
|
||||
// where fsnotify is used as an indirect dependency.
|
||||
//
|
||||
// Every event will be printed as soon as there's something useful to print,
|
||||
// with as little processing from fsnotify.
|
||||
//
|
||||
// Example output:
|
||||
//
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all files, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
b backend
|
||||
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// Event represents a file system notification.
|
||||
type Event struct {
|
||||
// Path to the file or directory.
|
||||
//
|
||||
// Paths are relative to the input; for example with Add("dir") the Name
|
||||
// will be set to "dir/file" if you create that file, but if you use
|
||||
// Add("/path/to/dir") it will be "/path/to/dir/file".
|
||||
Name string
|
||||
|
||||
// File operation that triggered the event.
|
||||
//
|
||||
// This is a bitmask and some systems may send multiple operations at once.
|
||||
// Use the Event.Has() method instead of comparing with ==.
|
||||
Op Op
|
||||
|
||||
// Create events will have this set to the old path if it's a rename. This
|
||||
// only works when both the source and destination are watched. It's not
|
||||
// reliable when watching individual files, only directories.
|
||||
//
|
||||
// For example "mv /tmp/file /tmp/rename" will emit:
|
||||
//
|
||||
// Event{Op: Rename, Name: "/tmp/file"}
|
||||
// Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
|
||||
renamedFrom string
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
type Op uint32
|
||||
|
||||
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
|
||||
// full description, and check them with [Event.Has].
|
||||
const (
|
||||
// A new pathname was created.
|
||||
Create Op = 1 << iota
|
||||
|
||||
// The pathname was written to; this does *not* mean the write has finished,
|
||||
// and a write can be followed by more writes.
|
||||
Write
|
||||
|
||||
// The path was removed; any watches on it will be removed. Some "remove"
|
||||
// operations may trigger a Rename if the file is actually moved (for
|
||||
// example "remove to trash" is often a rename).
|
||||
Remove
|
||||
|
||||
// The path was renamed to something else; any watches on it will be
|
||||
// removed.
|
||||
Rename
|
||||
|
||||
// File attributes were changed.
|
||||
//
|
||||
// It's generally not recommended to take action on this event, as it may
|
||||
// get triggered very frequently by some software. For example, Spotlight
|
||||
// indexing on macOS, anti-virus software, backup software, etc.
|
||||
Chmod
|
||||
|
||||
// File descriptor was opened.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableOpen
|
||||
|
||||
// File was read from.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableRead
|
||||
|
||||
// File opened for writing was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
//
|
||||
// The advantage of using this over Write is that it's more reliable than
|
||||
// waiting for Write events to stop. It's also faster (if you're not
|
||||
// listening to Write events): copying a file of a few GB can easily
|
||||
// generate tens of thousands of Write events in a short span of time.
|
||||
xUnportableCloseWrite
|
||||
|
||||
// File opened for reading was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableCloseRead
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNonExistentWatch is used when Remove() is called on a path that's not
|
||||
// added.
|
||||
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
|
||||
|
||||
// ErrClosed is used when trying to operate on a closed Watcher.
|
||||
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||
|
||||
// ErrEventOverflow is reported from the Errors channel when there are too
|
||||
// many events:
|
||||
//
|
||||
// - inotify: inotify returns IN_Q_OVERFLOW – because there are too
|
||||
// many queued events (the fs.inotify.max_queued_events
|
||||
// sysctl can be used to increase this).
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||
|
||||
// ErrUnsupported is returned by AddWith() when WithOps() specified an
|
||||
// Unportable event that's not supported on this platform.
|
||||
//lint:ignore ST1012 not relevant
|
||||
xErrUnsupported = errors.New("fsnotify: not supported with this backend")
|
||||
)
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
ev, errs := make(chan Event, defaultBufferSize), make(chan error)
|
||||
b, err := newBackend(ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
ev, errs := make(chan Event, sz), make(chan error)
|
||||
b, err := newBackend(ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(path string) error { return w.b.Add(path) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error { return w.b.Close() }
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// The order is undefined, and may differ per call. Returns nil if
|
||||
// [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string { return w.b.WatchList() }
|
||||
|
||||
// Supports reports if all the listed operations are supported by this platform.
|
||||
//
|
||||
// Create, Write, Remove, Rename, and Chmod are always supported. It can only
|
||||
// return false for an Op starting with Unportable.
|
||||
func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
|
||||
|
||||
func (o Op) String() string {
|
||||
var b strings.Builder
|
||||
if o.Has(Create) {
|
||||
b.WriteString("|CREATE")
|
||||
}
|
||||
if o.Has(Remove) {
|
||||
b.WriteString("|REMOVE")
|
||||
}
|
||||
if o.Has(Write) {
|
||||
b.WriteString("|WRITE")
|
||||
}
|
||||
if o.Has(xUnportableOpen) {
|
||||
b.WriteString("|OPEN")
|
||||
}
|
||||
if o.Has(xUnportableRead) {
|
||||
b.WriteString("|READ")
|
||||
}
|
||||
if o.Has(xUnportableCloseWrite) {
|
||||
b.WriteString("|CLOSE_WRITE")
|
||||
}
|
||||
if o.Has(xUnportableCloseRead) {
|
||||
b.WriteString("|CLOSE_READ")
|
||||
}
|
||||
if o.Has(Rename) {
|
||||
b.WriteString("|RENAME")
|
||||
}
|
||||
if o.Has(Chmod) {
|
||||
b.WriteString("|CHMOD")
|
||||
}
|
||||
if b.Len() == 0 {
|
||||
return "[no events]"
|
||||
}
|
||||
return b.String()[1:]
|
||||
}
|
||||
|
||||
// Has reports if this operation has the given operation.
|
||||
func (o Op) Has(h Op) bool { return o&h != 0 }
|
||||
|
||||
// Has reports if this event has the given operation.
|
||||
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||
|
||||
// String returns a string representation of the event with their path.
|
||||
func (e Event) String() string {
|
||||
if e.renamedFrom != "" {
|
||||
return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
|
||||
}
|
||||
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
||||
}
|
||||
|
||||
type (
|
||||
backend interface {
|
||||
Add(string) error
|
||||
AddWith(string, ...addOpt) error
|
||||
Remove(string) error
|
||||
WatchList() []string
|
||||
Close() error
|
||||
xSupports(Op) bool
|
||||
}
|
||||
addOpt func(opt *withOpts)
|
||||
withOpts struct {
|
||||
bufsize int
|
||||
op Op
|
||||
noFollow bool
|
||||
sendCreate bool
|
||||
}
|
||||
)
|
||||
|
||||
var debug = func() bool {
|
||||
// Check for exactly "1" (rather than mere existence) so we can add
|
||||
// options/flags in the future. I don't know if we ever want that, but it's
|
||||
// nice to leave the option open.
|
||||
return os.Getenv("FSNOTIFY_DEBUG") == "1"
|
||||
}()
|
||||
|
||||
var defaultOpts = withOpts{
|
||||
bufsize: 65536, // 64K
|
||||
op: Create | Write | Remove | Rename | Chmod,
|
||||
}
|
||||
|
||||
func getOptions(opts ...addOpt) withOpts {
|
||||
with := defaultOpts
|
||||
for _, o := range opts {
|
||||
if o != nil {
|
||||
o(&with)
|
||||
}
|
||||
}
|
||||
return with
|
||||
}
|
||||
|
||||
// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
|
||||
//
|
||||
// This only has effect on Windows systems, and is a no-op for other backends.
|
||||
//
|
||||
// The default value is 64K (65536 bytes) which is the highest value that works
|
||||
// on all filesystems and should be enough for most applications, but if you
|
||||
// have a large burst of events it may not be enough. You can increase it if
|
||||
// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
|
||||
//
|
||||
// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
func WithBufferSize(bytes int) addOpt {
|
||||
return func(opt *withOpts) { opt.bufsize = bytes }
|
||||
}
|
||||
|
||||
// WithOps sets which operations to listen for. The default is [Create],
|
||||
// [Write], [Remove], [Rename], and [Chmod].
|
||||
//
|
||||
// Excluding operations you're not interested in can save quite a bit of CPU
|
||||
// time; in some use cases there may be hundreds of thousands of useless Write
|
||||
// or Chmod operations per second.
|
||||
//
|
||||
// This can also be used to add unportable operations not supported by all
|
||||
// platforms; unportable operations all start with "Unportable":
|
||||
// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
|
||||
// [UnportableCloseRead].
|
||||
//
|
||||
// AddWith returns an error when using an unportable operation that's not
|
||||
// supported. Use [Watcher.Support] to check for support.
|
||||
func withOps(op Op) addOpt {
|
||||
return func(opt *withOpts) { opt.op = op }
|
||||
}
|
||||
|
||||
// WithNoFollow disables following symlinks, so the symlinks themselves are
|
||||
// watched.
|
||||
func withNoFollow() addOpt {
|
||||
return func(opt *withOpts) { opt.noFollow = true }
|
||||
}
|
||||
|
||||
// "Internal" option for recursive watches on inotify.
|
||||
func withCreate() addOpt {
|
||||
return func(opt *withOpts) { opt.sendCreate = true }
|
||||
}
|
||||
|
||||
var enableRecurse = false
|
||||
|
||||
// Check if this path is recursive (ends with "/..." or "\..."), and return the
|
||||
// path with the /... stripped.
|
||||
func recursivePath(path string) (string, bool) {
|
||||
path = filepath.Clean(path)
|
||||
if !enableRecurse { // Only enabled in tests for now.
|
||||
return path, false
|
||||
}
|
||||
if filepath.Base(path) == "..." {
|
||||
return filepath.Dir(path), true
|
||||
}
|
||||
return path, false
|
||||
}
|
||||
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
Normal file
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
//go:build darwin
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSyscallEACCES = syscall.EACCES
|
||||
ErrUnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = l.Cur
|
||||
|
||||
if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
|
||||
maxfiles = uint64(n)
|
||||
}
|
||||
|
||||
if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
|
||||
maxfiles = uint64(n)
|
||||
}
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
Normal file
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE},
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_BACKGROUND", unix.NOTE_BACKGROUND},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_CRITICAL", unix.NOTE_CRITICAL},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS},
|
||||
{"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR},
|
||||
{"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL},
|
||||
{"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL},
|
||||
{"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK},
|
||||
{"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY},
|
||||
{"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_FUNLOCK", unix.NOTE_FUNLOCK},
|
||||
{"NOTE_LEEWAY", unix.NOTE_LEEWAY},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_MACHTIME", unix.NOTE_MACHTIME},
|
||||
{"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME},
|
||||
{"NOTE_NONE", unix.NOTE_NONE},
|
||||
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||
{"NOTE_OOB", unix.NOTE_OOB},
|
||||
//{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!)
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_REAP", unix.NOTE_REAP},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||
{"NOTE_SIGNAL", unix.NOTE_SIGNAL},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||
{"NOTE_VM_ERROR", unix.NOTE_VM_ERROR},
|
||||
{"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE},
|
||||
{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE},
|
||||
{"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
Normal file
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_OOB", unix.NOTE_OOB},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
Normal file
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ABSTIME", unix.NOTE_ABSTIME},
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_CLOSE", unix.NOTE_CLOSE},
|
||||
{"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FILE_POLL", unix.NOTE_FILE_POLL},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_MSECONDS", unix.NOTE_MSECONDS},
|
||||
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||
{"NOTE_OPEN", unix.NOTE_OPEN},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_READ", unix.NOTE_READ},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
Normal file
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, kevent *unix.Kevent_t) {
|
||||
mask := uint32(kevent.Fflags)
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||
}
|
||||
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
Normal file
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, mask, cookie uint32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"IN_ACCESS", unix.IN_ACCESS},
|
||||
{"IN_ATTRIB", unix.IN_ATTRIB},
|
||||
{"IN_CLOSE", unix.IN_CLOSE},
|
||||
{"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE},
|
||||
{"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE},
|
||||
{"IN_CREATE", unix.IN_CREATE},
|
||||
{"IN_DELETE", unix.IN_DELETE},
|
||||
{"IN_DELETE_SELF", unix.IN_DELETE_SELF},
|
||||
{"IN_IGNORED", unix.IN_IGNORED},
|
||||
{"IN_ISDIR", unix.IN_ISDIR},
|
||||
{"IN_MODIFY", unix.IN_MODIFY},
|
||||
{"IN_MOVE", unix.IN_MOVE},
|
||||
{"IN_MOVED_FROM", unix.IN_MOVED_FROM},
|
||||
{"IN_MOVED_TO", unix.IN_MOVED_TO},
|
||||
{"IN_MOVE_SELF", unix.IN_MOVE_SELF},
|
||||
{"IN_OPEN", unix.IN_OPEN},
|
||||
{"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW},
|
||||
{"IN_UNMOUNT", unix.IN_UNMOUNT},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
var c string
|
||||
if cookie > 0 {
|
||||
c = fmt.Sprintf("(cookie: %d) ", cookie)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n",
|
||||
time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name)
|
||||
}
|
||||
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
Normal file
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
Normal file
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
// {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386?
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EOF", unix.NOTE_EOF},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRUNCATE", unix.NOTE_TRUNCATE},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
Normal file
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, mask int32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m int32
|
||||
}{
|
||||
{"FILE_ACCESS", unix.FILE_ACCESS},
|
||||
{"FILE_MODIFIED", unix.FILE_MODIFIED},
|
||||
{"FILE_ATTRIB", unix.FILE_ATTRIB},
|
||||
{"FILE_TRUNC", unix.FILE_TRUNC},
|
||||
{"FILE_NOFOLLOW", unix.FILE_NOFOLLOW},
|
||||
{"FILE_DELETE", unix.FILE_DELETE},
|
||||
{"FILE_RENAME_TO", unix.FILE_RENAME_TO},
|
||||
{"FILE_RENAME_FROM", unix.FILE_RENAME_FROM},
|
||||
{"UNMOUNTED", unix.UNMOUNTED},
|
||||
{"MOUNTEDOVER", unix.MOUNTEDOVER},
|
||||
{"FILE_EXCEPTION", unix.FILE_EXCEPTION},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||
}
|
||||
40
vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
generated
vendored
Normal file
40
vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func Debug(name string, mask uint32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED},
|
||||
{"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED},
|
||||
{"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED},
|
||||
{"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME},
|
||||
{"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name))
|
||||
}
|
||||
31
vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
generated
vendored
Normal file
31
vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
//go:build freebsd
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSyscallEACCES = syscall.EACCES
|
||||
ErrUnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = uint64(l.Cur)
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) }
|
||||
2
vendor/github.com/fsnotify/fsnotify/internal/internal.go
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/internal/internal.go
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package internal contains some helpers.
|
||||
package internal
|
||||
31
vendor/github.com/fsnotify/fsnotify/internal/unix.go
generated
vendored
Normal file
31
vendor/github.com/fsnotify/fsnotify/internal/unix.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
//go:build !windows && !darwin && !freebsd && !plan9
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSyscallEACCES = syscall.EACCES
|
||||
ErrUnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = uint64(l.Cur)
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||
7
vendor/github.com/fsnotify/fsnotify/internal/unix2.go
generated
vendored
Normal file
7
vendor/github.com/fsnotify/fsnotify/internal/unix2.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build !windows
|
||||
|
||||
package internal
|
||||
|
||||
func HasPrivilegesForSymlink() bool {
|
||||
return true
|
||||
}
|
||||
41
vendor/github.com/fsnotify/fsnotify/internal/windows.go
generated
vendored
Normal file
41
vendor/github.com/fsnotify/fsnotify/internal/windows.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
//go:build windows
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Just a dummy.
|
||||
var (
|
||||
ErrSyscallEACCES = errors.New("dummy")
|
||||
ErrUnixEACCES = errors.New("dummy")
|
||||
)
|
||||
|
||||
func SetRlimit() {}
|
||||
func Maxfiles() uint64 { return 1<<64 - 1 }
|
||||
func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") }
|
||||
func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") }
|
||||
|
||||
func HasPrivilegesForSymlink() bool {
|
||||
var sid *windows.SID
|
||||
err := windows.AllocateAndInitializeSid(
|
||||
&windows.SECURITY_NT_AUTHORITY,
|
||||
2,
|
||||
windows.SECURITY_BUILTIN_DOMAIN_RID,
|
||||
windows.DOMAIN_ALIAS_RID_ADMINS,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
&sid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer windows.FreeSid(sid)
|
||||
token := windows.Token(0)
|
||||
member, err := token.IsMember(sid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return member || token.IsElevated()
|
||||
}
|
||||
64
vendor/github.com/fsnotify/fsnotify/shared.go
generated
vendored
Normal file
64
vendor/github.com/fsnotify/fsnotify/shared.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package fsnotify
|
||||
|
||||
import "sync"
|
||||
|
||||
type shared struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
done chan struct{}
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newShared(ev chan Event, errs chan error) *shared {
|
||||
return &shared{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the event was sent, or false if watcher is closed.
|
||||
func (w *shared) sendEvent(e Event) bool {
|
||||
if e.Op == 0 {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Events <- e:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *shared) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *shared) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Mark as closed; returns true if it was already closed.
|
||||
func (w *shared) close() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.isClosed() {
|
||||
return true
|
||||
}
|
||||
close(w.done)
|
||||
return false
|
||||
}
|
||||
3
vendor/github.com/fsnotify/fsnotify/staticcheck.conf
generated
vendored
Normal file
3
vendor/github.com/fsnotify/fsnotify/staticcheck.conf
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
checks = ['all',
|
||||
'-U1000', # Don't complain about unused functions.
|
||||
]
|
||||
7
vendor/github.com/fsnotify/fsnotify/system_bsd.go
generated
vendored
Normal file
7
vendor/github.com/fsnotify/fsnotify/system_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
|
||||
8
vendor/github.com/fsnotify/fsnotify/system_darwin.go
generated
vendored
Normal file
8
vendor/github.com/fsnotify/fsnotify/system_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// note: this constant is not defined on BSD
|
||||
const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
|
||||
3
vendor/github.com/go-chi/chi/v5/.gitignore
generated
vendored
Normal file
3
vendor/github.com/go-chi/chi/v5/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
.idea
|
||||
*.sw?
|
||||
.vscode
|
||||
341
vendor/github.com/go-chi/chi/v5/CHANGELOG.md
generated
vendored
Normal file
341
vendor/github.com/go-chi/chi/v5/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,341 @@
|
||||
# Changelog
|
||||
|
||||
## v5.0.12 (2024-02-16)
|
||||
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.11...v5.0.12
|
||||
|
||||
|
||||
## v5.0.11 (2023-12-19)
|
||||
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.10...v5.0.11
|
||||
|
||||
|
||||
## v5.0.10 (2023-07-13)
|
||||
|
||||
- Fixed small edge case in tests of v5.0.9 for older Go versions
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.9...v5.0.10
|
||||
|
||||
|
||||
## v5.0.9 (2023-07-13)
|
||||
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.8...v5.0.9
|
||||
|
||||
|
||||
## v5.0.8 (2022-12-07)
|
||||
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.7...v5.0.8
|
||||
|
||||
|
||||
## v5.0.7 (2021-11-18)
|
||||
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.6...v5.0.7
|
||||
|
||||
|
||||
## v5.0.6 (2021-11-15)
|
||||
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.5...v5.0.6
|
||||
|
||||
|
||||
## v5.0.5 (2021-10-27)
|
||||
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.4...v5.0.5
|
||||
|
||||
|
||||
## v5.0.4 (2021-08-29)
|
||||
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.3...v5.0.4
|
||||
|
||||
|
||||
## v5.0.3 (2021-04-29)
|
||||
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.2...v5.0.3
|
||||
|
||||
|
||||
## v5.0.2 (2021-03-25)
|
||||
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.1...v5.0.2
|
||||
|
||||
|
||||
## v5.0.1 (2021-03-10)
|
||||
|
||||
- Small improvements
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v5.0.0...v5.0.1
|
||||
|
||||
|
||||
## v5.0.0 (2021-02-27)
|
||||
|
||||
- chi v5, `github.com/go-chi/chi/v5` introduces the adoption of Go's SIV to adhere to the current state-of-the-tools in Go.
|
||||
- chi v1.5.x did not work out as planned, as the Go tooling is too powerful and chi's adoption is too wide.
|
||||
The most responsible thing to do for everyone's benefit is to just release v5 with SIV, so I present to you all,
|
||||
chi v5 at `github.com/go-chi/chi/v5`. I hope someday the developer experience and ergonomics I've been seeking
|
||||
will still come to fruition in some form, see https://github.com/golang/go/issues/44550
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v1.5.4...v5.0.0
|
||||
|
||||
|
||||
## v1.5.4 (2021-02-27)
|
||||
|
||||
- Undo prior retraction in v1.5.3 as we prepare for v5.0.0 release
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v1.5.3...v1.5.4
|
||||
|
||||
|
||||
## v1.5.3 (2021-02-21)
|
||||
|
||||
- Update go.mod to go 1.16 with new retract directive marking all versions without prior go.mod support
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v1.5.2...v1.5.3
|
||||
|
||||
|
||||
## v1.5.2 (2021-02-10)
|
||||
|
||||
- Reverting allocation optimization as a precaution as go test -race fails.
|
||||
- Minor improvements, see history below
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v1.5.1...v1.5.2
|
||||
|
||||
|
||||
## v1.5.1 (2020-12-06)
|
||||
|
||||
- Performance improvement: removing 1 allocation by foregoing context.WithValue, thank you @bouk for
|
||||
your contribution (https://github.com/go-chi/chi/pull/555). Note: new benchmarks posted in README.
|
||||
- `middleware.CleanPath`: new middleware that clean's request path of double slashes
|
||||
- deprecate & remove `chi.ServerBaseContext` in favour of stdlib `http.Server#BaseContext`
|
||||
- plus other tiny improvements, see full commit history below
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v4.1.2...v1.5.1
|
||||
|
||||
|
||||
## v1.5.0 (2020-11-12) - now with go.mod support
|
||||
|
||||
`chi` dates back to 2016 with it's original implementation as one of the first routers to adopt the newly introduced
|
||||
context.Context api to the stdlib -- set out to design a router that is faster, more modular and simpler than anything
|
||||
else out there -- while not introducing any custom handler types or dependencies. Today, `chi` still has zero dependencies,
|
||||
and in many ways is future proofed from changes, given it's minimal nature. Between versions, chi's iterations have been very
|
||||
incremental, with the architecture and api being the same today as it was originally designed in 2016. For this reason it
|
||||
makes chi a pretty easy project to maintain, as well thanks to the many amazing community contributions over the years
|
||||
to who all help make chi better (total of 86 contributors to date -- thanks all!).
|
||||
|
||||
Chi has been a labour of love, art and engineering, with the goals to offer beautiful ergonomics, flexibility, performance
|
||||
and simplicity when building HTTP services with Go. I've strived to keep the router very minimal in surface area / code size,
|
||||
and always improving the code wherever possible -- and as of today the `chi` package is just 1082 lines of code (not counting
|
||||
middlewares, which are all optional). As well, I don't have the exact metrics, but from my analysis and email exchanges from
|
||||
companies and developers, chi is used by thousands of projects around the world -- thank you all as there is no better form of
|
||||
joy for me than to have art I had started be helpful and enjoyed by others. And of course I use chi in all of my own projects too :)
|
||||
|
||||
For me, the aesthetics of chi's code and usage are very important. With the introduction of Go's module support
|
||||
(which I'm a big fan of), chi's past versioning scheme choice to v2, v3 and v4 would mean I'd require the import path
|
||||
of "github.com/go-chi/chi/v4", leading to the lengthy discussion at https://github.com/go-chi/chi/issues/462.
|
||||
Haha, to some, you may be scratching your head why I've spent > 1 year stalling to adopt "/vXX" convention in the import
|
||||
path -- which isn't horrible in general -- but for chi, I'm unable to accept it as I strive for perfection in it's API design,
|
||||
aesthetics and simplicity. It just doesn't feel good to me given chi's simple nature -- I do not foresee a "v5" or "v6",
|
||||
and upgrading between versions in the future will also be just incremental.
|
||||
|
||||
I do understand versioning is a part of the API design as well, which is why the solution for a while has been to "do nothing",
|
||||
as Go supports both old and new import paths with/out go.mod. However, now that Go module support has had time to iron out kinks and
|
||||
is adopted everywhere, it's time for chi to get with the times. Luckily, I've discovered a path forward that will make me happy,
|
||||
while also not breaking anyone's app who adopted a prior versioning from tags in v2/v3/v4. I've made an experimental release of
|
||||
v1.5.0 with go.mod silently, and tested it with new and old projects, to ensure the developer experience is preserved, and it's
|
||||
largely unnoticed. Fortunately, Go's toolchain will check the tags of a repo and consider the "latest" tag the one with go.mod.
|
||||
However, you can still request a specific older tag such as v4.1.2, and everything will "just work". But new users can just
|
||||
`go get github.com/go-chi/chi` or `go get github.com/go-chi/chi@latest` and they will get the latest version which contains
|
||||
go.mod support, which is v1.5.0+. `chi` will not change very much over the years, just like it hasn't changed much from 4 years ago.
|
||||
Therefore, we will stay on v1.x from here on, starting from v1.5.0. Any breaking changes will bump a "minor" release and
|
||||
backwards-compatible improvements/fixes will bump a "tiny" release.
|
||||
|
||||
For existing projects who want to upgrade to the latest go.mod version, run: `go get -u github.com/go-chi/chi@v1.5.0`,
|
||||
which will get you on the go.mod version line (as Go's mod cache may still remember v4.x). Brand new systems can run
|
||||
`go get -u github.com/go-chi/chi` or `go get -u github.com/go-chi/chi@latest` to install chi, which will install v1.5.0+
|
||||
built with go.mod support.
|
||||
|
||||
My apologies to the developers who will disagree with the decisions above, but, hope you'll try it and see it's a very
|
||||
minor request which is backwards compatible and won't break your existing installations.
|
||||
|
||||
Cheers all, happy coding!
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
||||
## v4.1.2 (2020-06-02)
|
||||
|
||||
- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution
|
||||
- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2
|
||||
|
||||
|
||||
## v4.1.1 (2020-04-16)
|
||||
|
||||
- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp
|
||||
route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix!
|
||||
- new middleware.RouteHeaders as a simple router for request headers with wildcard support
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1
|
||||
|
||||
|
||||
## v4.1.0 (2020-04-1)
|
||||
|
||||
- middleware.LogEntry: Write method on interface now passes the response header
|
||||
and an extra interface type useful for custom logger implementations.
|
||||
- middleware.WrapResponseWriter: minor fix
|
||||
- middleware.Recoverer: a bit prettier
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0
|
||||
|
||||
## v4.0.4 (2020-03-24)
|
||||
|
||||
- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496)
|
||||
- a few minor improvements and fixes
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4
|
||||
|
||||
|
||||
## v4.0.3 (2020-01-09)
|
||||
|
||||
- core: fix regexp routing to include default value when param is not matched
|
||||
- middleware: rewrite of middleware.Compress
|
||||
- middleware: suppress http.ErrAbortHandler in middleware.Recoverer
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3
|
||||
|
||||
|
||||
## v4.0.2 (2019-02-26)
|
||||
|
||||
- Minor fixes
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2
|
||||
|
||||
|
||||
## v4.0.1 (2019-01-21)
|
||||
|
||||
- Fixes issue with compress middleware: #382 #385
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1
|
||||
|
||||
|
||||
## v4.0.0 (2019-01-10)
|
||||
|
||||
- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8
|
||||
- router: respond with 404 on router with no routes (#362)
|
||||
- router: additional check to ensure wildcard is at the end of a url pattern (#333)
|
||||
- middleware: deprecate use of http.CloseNotifier (#347)
|
||||
- middleware: fix RedirectSlashes to include query params on redirect (#334)
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0
|
||||
|
||||
|
||||
## v3.3.4 (2019-01-07)
|
||||
|
||||
- Minor middleware improvements. No changes to core library/router. Moving v3 into its
|
||||
- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11
|
||||
- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4
|
||||
|
||||
|
||||
## v3.3.3 (2018-08-27)
|
||||
|
||||
- Minor release
|
||||
- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3
|
||||
|
||||
|
||||
## v3.3.2 (2017-12-22)
|
||||
|
||||
- Support to route trailing slashes on mounted sub-routers (#281)
|
||||
- middleware: new `ContentCharset` to check matching charsets. Thank you
|
||||
@csucu for your community contribution!
|
||||
|
||||
|
||||
## v3.3.1 (2017-11-20)
|
||||
|
||||
- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types
|
||||
- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value
|
||||
- Minor bug fixes
|
||||
|
||||
|
||||
## v3.3.0 (2017-10-10)
|
||||
|
||||
- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage
|
||||
- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function
|
||||
|
||||
|
||||
## v3.2.1 (2017-08-31)
|
||||
|
||||
- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface
|
||||
and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path
|
||||
- Add new `RouteMethod` to `*Context`
|
||||
- Add new `Routes` pointer to `*Context`
|
||||
- Add new `middleware.GetHead` to route missing HEAD requests to GET handler
|
||||
- Updated benchmarks (see README)
|
||||
|
||||
|
||||
## v3.1.5 (2017-08-02)
|
||||
|
||||
- Setup golint and go vet for the project
|
||||
- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler`
|
||||
to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler`
|
||||
|
||||
|
||||
## v3.1.0 (2017-07-10)
|
||||
|
||||
- Fix a few minor issues after v3 release
|
||||
- Move `docgen` sub-pkg to https://github.com/go-chi/docgen
|
||||
- Move `render` sub-pkg to https://github.com/go-chi/render
|
||||
- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime
|
||||
suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in
|
||||
https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage.
|
||||
|
||||
|
||||
## v3.0.0 (2017-06-21)
|
||||
|
||||
- Major update to chi library with many exciting updates, but also some *breaking changes*
|
||||
- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as
|
||||
`/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the
|
||||
same router
|
||||
- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example:
|
||||
`r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")`
|
||||
- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as
|
||||
`r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like
|
||||
in `_examples/custom-handler`
|
||||
- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their
|
||||
own using file handler with the stdlib, see `_examples/fileserver` for an example
|
||||
- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()`
|
||||
- Moved the chi project to its own organization, to allow chi-related community packages to
|
||||
be easily discovered and supported, at: https://github.com/go-chi
|
||||
- *NOTE:* please update your import paths to `"github.com/go-chi/chi"`
|
||||
- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2
|
||||
|
||||
|
||||
## v2.1.0 (2017-03-30)
|
||||
|
||||
- Minor improvements and update to the chi core library
|
||||
- Introduced a brand new `chi/render` sub-package to complete the story of building
|
||||
APIs to offer a pattern for managing well-defined request / response payloads. Please
|
||||
check out the updated `_examples/rest` example for how it works.
|
||||
- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface
|
||||
|
||||
|
||||
## v2.0.0 (2017-01-06)
|
||||
|
||||
- After many months of v2 being in an RC state with many companies and users running it in
|
||||
production, the inclusion of some improvements to the middlewares, we are very pleased to
|
||||
announce v2.0.0 of chi.
|
||||
|
||||
|
||||
## v2.0.0-rc1 (2016-07-26)
|
||||
|
||||
- Huge update! chi v2 is a large refactor targeting Go 1.7+. As of Go 1.7, the popular
|
||||
community `"net/context"` package has been included in the standard library as `"context"` and
|
||||
utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other
|
||||
request-scoped values. We're very excited about the new context addition and are proud to
|
||||
introduce chi v2, a minimal and powerful routing package for building large HTTP services,
|
||||
with zero external dependencies. Chi focuses on idiomatic design and encourages the use of
|
||||
stdlib HTTP handlers and middlewares.
|
||||
- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc`
|
||||
- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()`
|
||||
- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`,
|
||||
which provides direct access to URL routing parameters, the routing path and the matching
|
||||
routing patterns.
|
||||
- Users upgrading from chi v1 to v2, need to:
|
||||
1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to
|
||||
the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)`
|
||||
2. Use `chi.URLParam(r *http.Request, paramKey string) string`
|
||||
or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value
|
||||
|
||||
|
||||
## v1.0.0 (2016-07-01)
|
||||
|
||||
- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older.
|
||||
|
||||
|
||||
## v0.9.0 (2016-03-31)
|
||||
|
||||
- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33)
|
||||
- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters
|
||||
has changed to: `chi.URLParam(ctx, "id")`
|
||||
31
vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md
generated
vendored
Normal file
31
vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Contributing
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. [Install Go][go-install].
|
||||
2. Download the sources and switch the working directory:
|
||||
|
||||
```bash
|
||||
go get -u -d github.com/go-chi/chi
|
||||
cd $GOPATH/src/github.com/go-chi/chi
|
||||
```
|
||||
|
||||
## Submitting a Pull Request
|
||||
|
||||
A typical workflow is:
|
||||
|
||||
1. [Fork the repository.][fork]
|
||||
2. [Create a topic branch.][branch]
|
||||
3. Add tests for your change.
|
||||
4. Run `go test`. If your tests pass, return to the step 3.
|
||||
5. Implement the change and ensure the steps from the previous step pass.
|
||||
6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline.
|
||||
7. [Add, commit and push your changes.][git-help]
|
||||
8. [Submit a pull request.][pull-req]
|
||||
|
||||
[go-install]: https://golang.org/doc/install
|
||||
[fork]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo
|
||||
[branch]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-branches
|
||||
[git-help]: https://docs.github.com/en
|
||||
[pull-req]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests
|
||||
|
||||
20
vendor/github.com/go-chi/chi/v5/LICENSE
generated
vendored
Normal file
20
vendor/github.com/go-chi/chi/v5/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
22
vendor/github.com/go-chi/chi/v5/Makefile
generated
vendored
Normal file
22
vendor/github.com/go-chi/chi/v5/Makefile
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
.PHONY: all
|
||||
all:
|
||||
@echo "**********************************************************"
|
||||
@echo "** chi build tool **"
|
||||
@echo "**********************************************************"
|
||||
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
go clean -testcache && $(MAKE) test-router && $(MAKE) test-middleware
|
||||
|
||||
.PHONY: test-router
|
||||
test-router:
|
||||
go test -race -v .
|
||||
|
||||
.PHONY: test-middleware
|
||||
test-middleware:
|
||||
go test -race -v ./middleware
|
||||
|
||||
.PHONY: docs
|
||||
docs:
|
||||
npx docsify-cli serve ./docs
|
||||
505
vendor/github.com/go-chi/chi/v5/README.md
generated
vendored
Normal file
505
vendor/github.com/go-chi/chi/v5/README.md
generated
vendored
Normal file
@@ -0,0 +1,505 @@
|
||||
# <img alt="chi" src="https://cdn.rawgit.com/go-chi/chi/master/_examples/chi.svg" width="220" />
|
||||
|
||||
|
||||
[![GoDoc Widget]][GoDoc]
|
||||
|
||||
`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's
|
||||
especially good at helping you write large REST API services that are kept maintainable as your
|
||||
project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to
|
||||
handle signaling, cancelation and request-scoped values across a handler chain.
|
||||
|
||||
The focus of the project has been to seek out an elegant and comfortable design for writing
|
||||
REST API servers, written during the development of the Pressly API service that powers our
|
||||
public API service, which in turn powers all of our client-side applications.
|
||||
|
||||
The key considerations of chi's design are: project structure, maintainability, standard http
|
||||
handlers (stdlib-only), developer productivity, and deconstructing a large system into many small
|
||||
parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also
|
||||
included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render)
|
||||
and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too!
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
go get -u github.com/go-chi/chi/v5
|
||||
```
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
* **Lightweight** - cloc'd in ~1000 LOC for the chi router
|
||||
* **Fast** - yes, see [benchmarks](#benchmarks)
|
||||
* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http`
|
||||
* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and sub-router mounting
|
||||
* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts
|
||||
* **Robust** - in production at Pressly, Cloudflare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91))
|
||||
* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown
|
||||
* **Go.mod support** - as of v5, go.mod support (see [CHANGELOG](https://github.com/go-chi/chi/blob/master/CHANGELOG.md))
|
||||
* **No external dependencies** - plain ol' Go stdlib + net/http
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples.
|
||||
|
||||
|
||||
**As easy as:**
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
)
|
||||
|
||||
func main() {
|
||||
r := chi.NewRouter()
|
||||
r.Use(middleware.Logger)
|
||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("welcome"))
|
||||
})
|
||||
http.ListenAndServe(":3000", r)
|
||||
}
|
||||
```
|
||||
|
||||
**REST Preview:**
|
||||
|
||||
Here is a little preview of what routing looks like with chi. Also take a look at the generated routing docs
|
||||
in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in
|
||||
Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)).
|
||||
|
||||
I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed
|
||||
above, they will show you all the features of chi and serve as a good form of documentation.
|
||||
|
||||
```go
|
||||
import (
|
||||
//...
|
||||
"context"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
)
|
||||
|
||||
func main() {
|
||||
r := chi.NewRouter()
|
||||
|
||||
// A good base middleware stack
|
||||
r.Use(middleware.RequestID)
|
||||
r.Use(middleware.RealIP)
|
||||
r.Use(middleware.Logger)
|
||||
r.Use(middleware.Recoverer)
|
||||
|
||||
// Set a timeout value on the request context (ctx), that will signal
|
||||
// through ctx.Done() that the request has timed out and further
|
||||
// processing should be stopped.
|
||||
r.Use(middleware.Timeout(60 * time.Second))
|
||||
|
||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("hi"))
|
||||
})
|
||||
|
||||
// RESTy routes for "articles" resource
|
||||
r.Route("/articles", func(r chi.Router) {
|
||||
r.With(paginate).Get("/", listArticles) // GET /articles
|
||||
r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017
|
||||
|
||||
r.Post("/", createArticle) // POST /articles
|
||||
r.Get("/search", searchArticles) // GET /articles/search
|
||||
|
||||
// Regexp url parameters:
|
||||
r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto
|
||||
|
||||
// Subrouters:
|
||||
r.Route("/{articleID}", func(r chi.Router) {
|
||||
r.Use(ArticleCtx)
|
||||
r.Get("/", getArticle) // GET /articles/123
|
||||
r.Put("/", updateArticle) // PUT /articles/123
|
||||
r.Delete("/", deleteArticle) // DELETE /articles/123
|
||||
})
|
||||
})
|
||||
|
||||
// Mount the admin sub-router
|
||||
r.Mount("/admin", adminRouter())
|
||||
|
||||
http.ListenAndServe(":3333", r)
|
||||
}
|
||||
|
||||
func ArticleCtx(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
articleID := chi.URLParam(r, "articleID")
|
||||
article, err := dbGetArticle(articleID)
|
||||
if err != nil {
|
||||
http.Error(w, http.StatusText(404), 404)
|
||||
return
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), "article", article)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func getArticle(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
article, ok := ctx.Value("article").(*Article)
|
||||
if !ok {
|
||||
http.Error(w, http.StatusText(422), 422)
|
||||
return
|
||||
}
|
||||
w.Write([]byte(fmt.Sprintf("title:%s", article.Title)))
|
||||
}
|
||||
|
||||
// A completely separate router for administrator routes
|
||||
func adminRouter() http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Use(AdminOnly)
|
||||
r.Get("/", adminIndex)
|
||||
r.Get("/accounts", adminListAccounts)
|
||||
return r
|
||||
}
|
||||
|
||||
func AdminOnly(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
perm, ok := ctx.Value("acl.permission").(YourPermissionType)
|
||||
if !ok || !perm.IsAdmin() {
|
||||
http.Error(w, http.StatusText(403), 403)
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Router interface
|
||||
|
||||
chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree).
|
||||
The router is fully compatible with `net/http`.
|
||||
|
||||
Built on top of the tree is the `Router` interface:
|
||||
|
||||
```go
|
||||
// Router consisting of the core routing methods used by chi's Mux,
|
||||
// using only the standard net/http.
|
||||
type Router interface {
|
||||
http.Handler
|
||||
Routes
|
||||
|
||||
// Use appends one or more middlewares onto the Router stack.
|
||||
Use(middlewares ...func(http.Handler) http.Handler)
|
||||
|
||||
// With adds inline middlewares for an endpoint handler.
|
||||
With(middlewares ...func(http.Handler) http.Handler) Router
|
||||
|
||||
// Group adds a new inline-Router along the current routing
|
||||
// path, with a fresh middleware stack for the inline-Router.
|
||||
Group(fn func(r Router)) Router
|
||||
|
||||
// Route mounts a sub-Router along a `pattern` string.
|
||||
Route(pattern string, fn func(r Router)) Router
|
||||
|
||||
// Mount attaches another http.Handler along ./pattern/*
|
||||
Mount(pattern string, h http.Handler)
|
||||
|
||||
// Handle and HandleFunc adds routes for `pattern` that matches
|
||||
// all HTTP methods.
|
||||
Handle(pattern string, h http.Handler)
|
||||
HandleFunc(pattern string, h http.HandlerFunc)
|
||||
|
||||
// Method and MethodFunc adds routes for `pattern` that matches
|
||||
// the `method` HTTP method.
|
||||
Method(method, pattern string, h http.Handler)
|
||||
MethodFunc(method, pattern string, h http.HandlerFunc)
|
||||
|
||||
// HTTP-method routing along `pattern`
|
||||
Connect(pattern string, h http.HandlerFunc)
|
||||
Delete(pattern string, h http.HandlerFunc)
|
||||
Get(pattern string, h http.HandlerFunc)
|
||||
Head(pattern string, h http.HandlerFunc)
|
||||
Options(pattern string, h http.HandlerFunc)
|
||||
Patch(pattern string, h http.HandlerFunc)
|
||||
Post(pattern string, h http.HandlerFunc)
|
||||
Put(pattern string, h http.HandlerFunc)
|
||||
Trace(pattern string, h http.HandlerFunc)
|
||||
|
||||
// NotFound defines a handler to respond whenever a route could
|
||||
// not be found.
|
||||
NotFound(h http.HandlerFunc)
|
||||
|
||||
// MethodNotAllowed defines a handler to respond whenever a method is
|
||||
// not allowed.
|
||||
MethodNotAllowed(h http.HandlerFunc)
|
||||
}
|
||||
|
||||
// Routes interface adds two methods for router traversal, which is also
|
||||
// used by the github.com/go-chi/docgen package to generate documentation for Routers.
|
||||
type Routes interface {
|
||||
// Routes returns the routing tree in an easily traversable structure.
|
||||
Routes() []Route
|
||||
|
||||
// Middlewares returns the list of middlewares in use by the router.
|
||||
Middlewares() Middlewares
|
||||
|
||||
// Match searches the routing tree for a handler that matches
|
||||
// the method/path - similar to routing a http request, but without
|
||||
// executing the handler thereafter.
|
||||
Match(rctx *Context, method, path string) bool
|
||||
}
|
||||
```
|
||||
|
||||
Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern
|
||||
supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters
|
||||
can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters
|
||||
and `chi.URLParam(r, "*")` for a wildcard parameter.
|
||||
|
||||
|
||||
### Middleware handlers
|
||||
|
||||
chi's middlewares are just stdlib net/http middleware handlers. There is nothing special
|
||||
about them, which means the router and all the tooling is designed to be compatible and
|
||||
friendly with any middleware in the community. This offers much better extensibility and reuse
|
||||
of packages and is at the heart of chi's purpose.
|
||||
|
||||
Here is an example of a standard net/http middleware where we assign a context key `"user"`
|
||||
the value of `"123"`. This middleware sets a hypothetical user identifier on the request
|
||||
context and calls the next handler in the chain.
|
||||
|
||||
```go
|
||||
// HTTP middleware setting a value on the request context
|
||||
func MyMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// create new context from `r` request context, and assign key `"user"`
|
||||
// to value of `"123"`
|
||||
ctx := context.WithValue(r.Context(), "user", "123")
|
||||
|
||||
// call the next handler in the chain, passing the response writer and
|
||||
// the updated request object with the new context value.
|
||||
//
|
||||
// note: context.Context values are nested, so any previously set
|
||||
// values will be accessible as well, and the new `"user"` key
|
||||
// will be accessible from this point forward.
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Request handlers
|
||||
|
||||
chi uses standard net/http request handlers. This little snippet is an example of a http.Handler
|
||||
func that reads a user identifier from the request context - hypothetically, identifying
|
||||
the user sending an authenticated request, validated+set by a previous middleware handler.
|
||||
|
||||
```go
|
||||
// HTTP handler accessing data from the request context.
|
||||
func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// here we read from the request context and fetch out `"user"` key set in
|
||||
// the MyMiddleware example above.
|
||||
user := r.Context().Value("user").(string)
|
||||
|
||||
// respond to the client
|
||||
w.Write([]byte(fmt.Sprintf("hi %s", user)))
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### URL parameters
|
||||
|
||||
chi's router parses and stores URL parameters right onto the request context. Here is
|
||||
an example of how to access URL params in your net/http handlers. And of course, middlewares
|
||||
are able to access the same information.
|
||||
|
||||
```go
|
||||
// HTTP handler accessing the url routing parameters.
|
||||
func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// fetch the url parameter `"userID"` from the request of a matching
|
||||
// routing pattern. An example routing pattern could be: /users/{userID}
|
||||
userID := chi.URLParam(r, "userID")
|
||||
|
||||
// fetch `"key"` from the request context
|
||||
ctx := r.Context()
|
||||
key := ctx.Value("key").(string)
|
||||
|
||||
// respond to the client
|
||||
w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key)))
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Middlewares
|
||||
|
||||
chi comes equipped with an optional `middleware` package, providing a suite of standard
|
||||
`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible
|
||||
with `net/http` can be used with chi's mux.
|
||||
|
||||
### Core middlewares
|
||||
|
||||
----------------------------------------------------------------------------------------------------
|
||||
| chi/middleware Handler | description |
|
||||
| :--------------------- | :---------------------------------------------------------------------- |
|
||||
| [AllowContentEncoding] | Enforces a whitelist of request Content-Encoding headers |
|
||||
| [AllowContentType] | Explicit whitelist of accepted request Content-Types |
|
||||
| [BasicAuth] | Basic HTTP authentication |
|
||||
| [Compress] | Gzip compression for clients that accept compressed responses |
|
||||
| [ContentCharset] | Ensure charset for Content-Type request headers |
|
||||
| [CleanPath] | Clean double slashes from request path |
|
||||
| [GetHead] | Automatically route undefined HEAD requests to GET handlers |
|
||||
| [Heartbeat] | Monitoring endpoint to check the servers pulse |
|
||||
| [Logger] | Logs the start and end of each request with the elapsed processing time |
|
||||
| [NoCache] | Sets response headers to prevent clients from caching |
|
||||
| [Profiler] | Easily attach net/http/pprof to your routers |
|
||||
| [RealIP] | Sets a http.Request's RemoteAddr to either X-Real-IP or X-Forwarded-For |
|
||||
| [Recoverer] | Gracefully absorb panics and prints the stack trace |
|
||||
| [RequestID] | Injects a request ID into the context of each request |
|
||||
| [RedirectSlashes] | Redirect slashes on routing paths |
|
||||
| [RouteHeaders] | Route handling for request headers |
|
||||
| [SetHeader] | Short-hand middleware to set a response header key/value |
|
||||
| [StripSlashes] | Strip slashes on routing paths |
|
||||
| [Sunset] | Sunset set Deprecation/Sunset header to response |
|
||||
| [Throttle] | Puts a ceiling on the number of concurrent requests |
|
||||
| [Timeout] | Signals to the request context when the timeout deadline is reached |
|
||||
| [URLFormat] | Parse extension from url and put it on request context |
|
||||
| [WithValue] | Short-hand middleware to set a key/value on the request context |
|
||||
----------------------------------------------------------------------------------------------------
|
||||
|
||||
[AllowContentEncoding]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentEncoding
|
||||
[AllowContentType]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentType
|
||||
[BasicAuth]: https://pkg.go.dev/github.com/go-chi/chi/middleware#BasicAuth
|
||||
[Compress]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compress
|
||||
[ContentCharset]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ContentCharset
|
||||
[CleanPath]: https://pkg.go.dev/github.com/go-chi/chi/middleware#CleanPath
|
||||
[GetHead]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetHead
|
||||
[GetReqID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetReqID
|
||||
[Heartbeat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Heartbeat
|
||||
[Logger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Logger
|
||||
[NoCache]: https://pkg.go.dev/github.com/go-chi/chi/middleware#NoCache
|
||||
[Profiler]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Profiler
|
||||
[RealIP]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RealIP
|
||||
[Recoverer]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Recoverer
|
||||
[RedirectSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RedirectSlashes
|
||||
[RequestLogger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestLogger
|
||||
[RequestID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestID
|
||||
[RouteHeaders]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RouteHeaders
|
||||
[SetHeader]: https://pkg.go.dev/github.com/go-chi/chi/middleware#SetHeader
|
||||
[StripSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#StripSlashes
|
||||
[Sunset]: https://pkg.go.dev/github.com/go-chi/chi/v5/middleware#Sunset
|
||||
[Throttle]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Throttle
|
||||
[ThrottleBacklog]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleBacklog
|
||||
[ThrottleWithOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleWithOpts
|
||||
[Timeout]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Timeout
|
||||
[URLFormat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#URLFormat
|
||||
[WithLogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithLogEntry
|
||||
[WithValue]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithValue
|
||||
[Compressor]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compressor
|
||||
[DefaultLogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#DefaultLogFormatter
|
||||
[EncoderFunc]: https://pkg.go.dev/github.com/go-chi/chi/middleware#EncoderFunc
|
||||
[HeaderRoute]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRoute
|
||||
[HeaderRouter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRouter
|
||||
[LogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogEntry
|
||||
[LogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogFormatter
|
||||
[LoggerInterface]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LoggerInterface
|
||||
[ThrottleOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleOpts
|
||||
[WrapResponseWriter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WrapResponseWriter
|
||||
|
||||
### Extra middlewares & packages
|
||||
|
||||
Please see https://github.com/go-chi for additional packages.
|
||||
|
||||
--------------------------------------------------------------------------------------------------------------------
|
||||
| package | description |
|
||||
|:---------------------------------------------------|:-------------------------------------------------------------
|
||||
| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) |
|
||||
| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime |
|
||||
| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication |
|
||||
| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing |
|
||||
| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging |
|
||||
| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter |
|
||||
| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library |
|
||||
| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources |
|
||||
| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer |
|
||||
--------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
## context?
|
||||
|
||||
`context` is a tiny pkg that provides simple interface to signal context across call stacks
|
||||
and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani)
|
||||
and is available in stdlib since go1.7.
|
||||
|
||||
Learn more at https://blog.golang.org/context
|
||||
|
||||
and..
|
||||
* Docs: https://golang.org/pkg/context
|
||||
* Source: https://github.com/golang/go/tree/master/src/context
|
||||
|
||||
|
||||
## Benchmarks
|
||||
|
||||
The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark
|
||||
|
||||
Results as of Nov 29, 2020 with Go 1.15.5 on Linux AMD 3950x
|
||||
|
||||
```shell
|
||||
BenchmarkChi_Param 3075895 384 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_Param5 2116603 566 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_Param20 964117 1227 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_ParamWrite 2863413 420 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_GithubStatic 3045488 395 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_GithubParam 2204115 540 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_GithubAll 10000 113811 ns/op 81203 B/op 406 allocs/op
|
||||
BenchmarkChi_GPlusStatic 3337485 359 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_GPlusParam 2825853 423 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_GPlus2Params 2471697 483 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_GPlusAll 194220 5950 ns/op 5200 B/op 26 allocs/op
|
||||
BenchmarkChi_ParseStatic 3365324 356 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_ParseParam 2976614 404 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_Parse2Params 2638084 439 ns/op 400 B/op 2 allocs/op
|
||||
BenchmarkChi_ParseAll 109567 11295 ns/op 10400 B/op 52 allocs/op
|
||||
BenchmarkChi_StaticAll 16846 71308 ns/op 62802 B/op 314 allocs/op
|
||||
```
|
||||
|
||||
Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc
|
||||
|
||||
NOTE: the allocs in the benchmark above are from the calls to http.Request's
|
||||
`WithContext(context.Context)` method that clones the http.Request, sets the `Context()`
|
||||
on the duplicated (alloc'd) request and returns it the new request object. This is just
|
||||
how setting context on a request in Go works.
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
* Carl Jackson for https://github.com/zenazn/goji
|
||||
* Parts of chi's thinking comes from goji, and chi's middleware package
|
||||
sources from [goji](https://github.com/zenazn/goji/tree/master/web/middleware).
|
||||
* Please see goji's [LICENSE](https://github.com/zenazn/goji/blob/master/LICENSE) (MIT)
|
||||
* Armon Dadgar for https://github.com/armon/go-radix
|
||||
* Contributions: [@VojtechVitek](https://github.com/VojtechVitek)
|
||||
|
||||
We'll be more than happy to see [your contributions](./CONTRIBUTING.md)!
|
||||
|
||||
|
||||
## Beyond REST
|
||||
|
||||
chi is just a http router that lets you decompose request handling into many smaller layers.
|
||||
Many companies use chi to write REST services for their public APIs. But, REST is just a convention
|
||||
for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server
|
||||
system or network of microservices.
|
||||
|
||||
Looking beyond REST, I also recommend some newer works in the field:
|
||||
* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen
|
||||
* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs
|
||||
* [graphql](https://github.com/99designs/gqlgen) - Declarative query language
|
||||
* [NATS](https://nats.io) - lightweight pub-sub
|
||||
|
||||
|
||||
## License
|
||||
|
||||
Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka)
|
||||
|
||||
Licensed under [MIT License](./LICENSE)
|
||||
|
||||
[GoDoc]: https://pkg.go.dev/github.com/go-chi/chi/v5
|
||||
[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg
|
||||
[Travis]: https://travis-ci.org/go-chi/chi
|
||||
[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master
|
||||
5
vendor/github.com/go-chi/chi/v5/SECURITY.md
generated
vendored
Normal file
5
vendor/github.com/go-chi/chi/v5/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Reporting Security Issues
|
||||
|
||||
We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions.
|
||||
|
||||
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/go-chi/chi/security/advisories/new) tab.
|
||||
49
vendor/github.com/go-chi/chi/v5/chain.go
generated
vendored
Normal file
49
vendor/github.com/go-chi/chi/v5/chain.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package chi
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Chain returns a Middlewares type from a slice of middleware handlers.
|
||||
func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares {
|
||||
return Middlewares(middlewares)
|
||||
}
|
||||
|
||||
// Handler builds and returns a http.Handler from the chain of middlewares,
|
||||
// with `h http.Handler` as the final handler.
|
||||
func (mws Middlewares) Handler(h http.Handler) http.Handler {
|
||||
return &ChainHandler{h, chain(mws, h), mws}
|
||||
}
|
||||
|
||||
// HandlerFunc builds and returns a http.Handler from the chain of middlewares,
|
||||
// with `h http.Handler` as the final handler.
|
||||
func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler {
|
||||
return &ChainHandler{h, chain(mws, h), mws}
|
||||
}
|
||||
|
||||
// ChainHandler is a http.Handler with support for handler composition and
|
||||
// execution.
|
||||
type ChainHandler struct {
|
||||
Endpoint http.Handler
|
||||
chain http.Handler
|
||||
Middlewares Middlewares
|
||||
}
|
||||
|
||||
func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
c.chain.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// chain builds a http.Handler composed of an inline middleware stack and endpoint
|
||||
// handler in the order they are passed.
|
||||
func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler {
|
||||
// Return ahead of time if there aren't any middlewares for the chain
|
||||
if len(middlewares) == 0 {
|
||||
return endpoint
|
||||
}
|
||||
|
||||
// Wrap the end handler with the middleware chain
|
||||
h := middlewares[len(middlewares)-1](endpoint)
|
||||
for i := len(middlewares) - 2; i >= 0; i-- {
|
||||
h = middlewares[i](h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
137
vendor/github.com/go-chi/chi/v5/chi.go
generated
vendored
Normal file
137
vendor/github.com/go-chi/chi/v5/chi.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
// Package chi is a small, idiomatic and composable router for building HTTP services.
|
||||
//
|
||||
// chi requires Go 1.14 or newer.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/go-chi/chi/v5"
|
||||
// "github.com/go-chi/chi/v5/middleware"
|
||||
// )
|
||||
//
|
||||
// func main() {
|
||||
// r := chi.NewRouter()
|
||||
// r.Use(middleware.Logger)
|
||||
// r.Use(middleware.Recoverer)
|
||||
//
|
||||
// r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
// w.Write([]byte("root."))
|
||||
// })
|
||||
//
|
||||
// http.ListenAndServe(":3333", r)
|
||||
// }
|
||||
//
|
||||
// See github.com/go-chi/chi/_examples/ for more in-depth examples.
|
||||
//
|
||||
// URL patterns allow for easy matching of path components in HTTP
|
||||
// requests. The matching components can then be accessed using
|
||||
// chi.URLParam(). All patterns must begin with a slash.
|
||||
//
|
||||
// A simple named placeholder {name} matches any sequence of characters
|
||||
// up to the next / or the end of the URL. Trailing slashes on paths must
|
||||
// be handled explicitly.
|
||||
//
|
||||
// A placeholder with a name followed by a colon allows a regular
|
||||
// expression match, for example {number:\\d+}. The regular expression
|
||||
// syntax is Go's normal regexp RE2 syntax, except that / will never be
|
||||
// matched. An anonymous regexp pattern is allowed, using an empty string
|
||||
// before the colon in the placeholder, such as {:\\d+}
|
||||
//
|
||||
// The special placeholder of asterisk matches the rest of the requested
|
||||
// URL. Any trailing characters in the pattern are ignored. This is the only
|
||||
// placeholder which will match / characters.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/"
|
||||
// "/user/{name}/info" matches "/user/jsmith/info"
|
||||
// "/page/*" matches "/page/intro/latest"
|
||||
// "/page/{other}/latest" also matches "/page/intro/latest"
|
||||
// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01"
|
||||
package chi
|
||||
|
||||
import "net/http"
|
||||
|
||||
// NewRouter returns a new Mux object that implements the Router interface.
|
||||
func NewRouter() *Mux {
|
||||
return NewMux()
|
||||
}
|
||||
|
||||
// Router consisting of the core routing methods used by chi's Mux,
|
||||
// using only the standard net/http.
|
||||
type Router interface {
|
||||
http.Handler
|
||||
Routes
|
||||
|
||||
// Use appends one or more middlewares onto the Router stack.
|
||||
Use(middlewares ...func(http.Handler) http.Handler)
|
||||
|
||||
// With adds inline middlewares for an endpoint handler.
|
||||
With(middlewares ...func(http.Handler) http.Handler) Router
|
||||
|
||||
// Group adds a new inline-Router along the current routing
|
||||
// path, with a fresh middleware stack for the inline-Router.
|
||||
Group(fn func(r Router)) Router
|
||||
|
||||
// Route mounts a sub-Router along a `pattern`` string.
|
||||
Route(pattern string, fn func(r Router)) Router
|
||||
|
||||
// Mount attaches another http.Handler along ./pattern/*
|
||||
Mount(pattern string, h http.Handler)
|
||||
|
||||
// Handle and HandleFunc adds routes for `pattern` that matches
|
||||
// all HTTP methods.
|
||||
Handle(pattern string, h http.Handler)
|
||||
HandleFunc(pattern string, h http.HandlerFunc)
|
||||
|
||||
// Method and MethodFunc adds routes for `pattern` that matches
|
||||
// the `method` HTTP method.
|
||||
Method(method, pattern string, h http.Handler)
|
||||
MethodFunc(method, pattern string, h http.HandlerFunc)
|
||||
|
||||
// HTTP-method routing along `pattern`
|
||||
Connect(pattern string, h http.HandlerFunc)
|
||||
Delete(pattern string, h http.HandlerFunc)
|
||||
Get(pattern string, h http.HandlerFunc)
|
||||
Head(pattern string, h http.HandlerFunc)
|
||||
Options(pattern string, h http.HandlerFunc)
|
||||
Patch(pattern string, h http.HandlerFunc)
|
||||
Post(pattern string, h http.HandlerFunc)
|
||||
Put(pattern string, h http.HandlerFunc)
|
||||
Trace(pattern string, h http.HandlerFunc)
|
||||
|
||||
// NotFound defines a handler to respond whenever a route could
|
||||
// not be found.
|
||||
NotFound(h http.HandlerFunc)
|
||||
|
||||
// MethodNotAllowed defines a handler to respond whenever a method is
|
||||
// not allowed.
|
||||
MethodNotAllowed(h http.HandlerFunc)
|
||||
}
|
||||
|
||||
// Routes interface adds two methods for router traversal, which is also
|
||||
// used by the `docgen` subpackage to generation documentation for Routers.
|
||||
type Routes interface {
|
||||
// Routes returns the routing tree in an easily traversable structure.
|
||||
Routes() []Route
|
||||
|
||||
// Middlewares returns the list of middlewares in use by the router.
|
||||
Middlewares() Middlewares
|
||||
|
||||
// Match searches the routing tree for a handler that matches
|
||||
// the method/path - similar to routing a http request, but without
|
||||
// executing the handler thereafter.
|
||||
Match(rctx *Context, method, path string) bool
|
||||
|
||||
// Find searches the routing tree for the pattern that matches
|
||||
// the method/path.
|
||||
Find(rctx *Context, method, path string) string
|
||||
}
|
||||
|
||||
// Middlewares type is a slice of standard middleware handlers with methods
|
||||
// to compose middleware chains and http.Handler's.
|
||||
type Middlewares []func(http.Handler) http.Handler
|
||||
165
vendor/github.com/go-chi/chi/v5/context.go
generated
vendored
Normal file
165
vendor/github.com/go-chi/chi/v5/context.go
generated
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
package chi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// URLParam returns the url parameter from a http.Request object.
|
||||
func URLParam(r *http.Request, key string) string {
|
||||
if rctx := RouteContext(r.Context()); rctx != nil {
|
||||
return rctx.URLParam(key)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// URLParamFromCtx returns the url parameter from a http.Request Context.
|
||||
func URLParamFromCtx(ctx context.Context, key string) string {
|
||||
if rctx := RouteContext(ctx); rctx != nil {
|
||||
return rctx.URLParam(key)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RouteContext returns chi's routing Context object from a
|
||||
// http.Request Context.
|
||||
func RouteContext(ctx context.Context) *Context {
|
||||
val, _ := ctx.Value(RouteCtxKey).(*Context)
|
||||
return val
|
||||
}
|
||||
|
||||
// NewRouteContext returns a new routing Context object.
|
||||
func NewRouteContext() *Context {
|
||||
return &Context{}
|
||||
}
|
||||
|
||||
var (
|
||||
// RouteCtxKey is the context.Context key to store the request context.
|
||||
RouteCtxKey = &contextKey{"RouteContext"}
|
||||
)
|
||||
|
||||
// Context is the default routing context set on the root node of a
|
||||
// request context to track route patterns, URL parameters and
|
||||
// an optional routing path.
|
||||
type Context struct {
|
||||
Routes Routes
|
||||
|
||||
// parentCtx is the parent of this one, for using Context as a
|
||||
// context.Context directly. This is an optimization that saves
|
||||
// 1 allocation.
|
||||
parentCtx context.Context
|
||||
|
||||
// Routing path/method override used during the route search.
|
||||
// See Mux#routeHTTP method.
|
||||
RoutePath string
|
||||
RouteMethod string
|
||||
|
||||
// URLParams are the stack of routeParams captured during the
|
||||
// routing lifecycle across a stack of sub-routers.
|
||||
URLParams RouteParams
|
||||
|
||||
// Route parameters matched for the current sub-router. It is
|
||||
// intentionally unexported so it can't be tampered.
|
||||
routeParams RouteParams
|
||||
|
||||
// The endpoint routing pattern that matched the request URI path
|
||||
// or `RoutePath` of the current sub-router. This value will update
|
||||
// during the lifecycle of a request passing through a stack of
|
||||
// sub-routers.
|
||||
routePattern string
|
||||
|
||||
// Routing pattern stack throughout the lifecycle of the request,
|
||||
// across all connected routers. It is a record of all matching
|
||||
// patterns across a stack of sub-routers.
|
||||
RoutePatterns []string
|
||||
|
||||
methodsAllowed []methodTyp // allowed methods in case of a 405
|
||||
methodNotAllowed bool
|
||||
}
|
||||
|
||||
// Reset a routing context to its initial state.
|
||||
func (x *Context) Reset() {
|
||||
x.Routes = nil
|
||||
x.RoutePath = ""
|
||||
x.RouteMethod = ""
|
||||
x.RoutePatterns = x.RoutePatterns[:0]
|
||||
x.URLParams.Keys = x.URLParams.Keys[:0]
|
||||
x.URLParams.Values = x.URLParams.Values[:0]
|
||||
|
||||
x.routePattern = ""
|
||||
x.routeParams.Keys = x.routeParams.Keys[:0]
|
||||
x.routeParams.Values = x.routeParams.Values[:0]
|
||||
x.methodNotAllowed = false
|
||||
x.methodsAllowed = x.methodsAllowed[:0]
|
||||
x.parentCtx = nil
|
||||
}
|
||||
|
||||
// URLParam returns the corresponding URL parameter value from the request
|
||||
// routing context.
|
||||
func (x *Context) URLParam(key string) string {
|
||||
for k := len(x.URLParams.Keys) - 1; k >= 0; k-- {
|
||||
if x.URLParams.Keys[k] == key {
|
||||
return x.URLParams.Values[k]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RoutePattern builds the routing pattern string for the particular
|
||||
// request, at the particular point during routing. This means, the value
|
||||
// will change throughout the execution of a request in a router. That is
|
||||
// why it's advised to only use this value after calling the next handler.
|
||||
//
|
||||
// For example,
|
||||
//
|
||||
// func Instrument(next http.Handler) http.Handler {
|
||||
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// next.ServeHTTP(w, r)
|
||||
// routePattern := chi.RouteContext(r.Context()).RoutePattern()
|
||||
// measure(w, r, routePattern)
|
||||
// })
|
||||
// }
|
||||
func (x *Context) RoutePattern() string {
|
||||
if x == nil {
|
||||
return ""
|
||||
}
|
||||
routePattern := strings.Join(x.RoutePatterns, "")
|
||||
routePattern = replaceWildcards(routePattern)
|
||||
if routePattern != "/" {
|
||||
routePattern = strings.TrimSuffix(routePattern, "//")
|
||||
routePattern = strings.TrimSuffix(routePattern, "/")
|
||||
}
|
||||
return routePattern
|
||||
}
|
||||
|
||||
// replaceWildcards takes a route pattern and recursively replaces all
|
||||
// occurrences of "/*/" to "/".
|
||||
func replaceWildcards(p string) string {
|
||||
if strings.Contains(p, "/*/") {
|
||||
return replaceWildcards(strings.Replace(p, "/*/", "/", -1))
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// RouteParams is a structure to track URL routing parameters efficiently.
|
||||
type RouteParams struct {
|
||||
Keys, Values []string
|
||||
}
|
||||
|
||||
// Add will append a URL parameter to the end of the route param
|
||||
func (s *RouteParams) Add(key, value string) {
|
||||
s.Keys = append(s.Keys, key)
|
||||
s.Values = append(s.Values, value)
|
||||
}
|
||||
|
||||
// contextKey is a value for use with context.WithValue. It's used as
|
||||
// a pointer so it fits in an interface{} without allocation. This technique
|
||||
// for defining context keys was copied from Go 1.7's new use of context in net/http.
|
||||
type contextKey struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (k *contextKey) String() string {
|
||||
return "chi context value " + k.name
|
||||
}
|
||||
527
vendor/github.com/go-chi/chi/v5/mux.go
generated
vendored
Normal file
527
vendor/github.com/go-chi/chi/v5/mux.go
generated
vendored
Normal file
@@ -0,0 +1,527 @@
|
||||
package chi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ Router = &Mux{}
|
||||
|
||||
// Mux is a simple HTTP route multiplexer that parses a request path,
|
||||
// records any URL params, and executes an end handler. It implements
|
||||
// the http.Handler interface and is friendly with the standard library.
|
||||
//
|
||||
// Mux is designed to be fast, minimal and offer a powerful API for building
|
||||
// modular and composable HTTP services with a large set of handlers. It's
|
||||
// particularly useful for writing large REST API services that break a handler
|
||||
// into many smaller parts composed of middlewares and end handlers.
|
||||
type Mux struct {
|
||||
// The computed mux handler made of the chained middleware stack and
|
||||
// the tree router
|
||||
handler http.Handler
|
||||
|
||||
// The radix trie router
|
||||
tree *node
|
||||
|
||||
// Custom method not allowed handler
|
||||
methodNotAllowedHandler http.HandlerFunc
|
||||
|
||||
// A reference to the parent mux used by subrouters when mounting
|
||||
// to a parent mux
|
||||
parent *Mux
|
||||
|
||||
// Routing context pool
|
||||
pool *sync.Pool
|
||||
|
||||
// Custom route not found handler
|
||||
notFoundHandler http.HandlerFunc
|
||||
|
||||
// The middleware stack
|
||||
middlewares []func(http.Handler) http.Handler
|
||||
|
||||
// Controls the behaviour of middleware chain generation when a mux
|
||||
// is registered as an inline group inside another mux.
|
||||
inline bool
|
||||
}
|
||||
|
||||
// NewMux returns a newly initialized Mux object that implements the Router
|
||||
// interface.
|
||||
func NewMux() *Mux {
|
||||
mux := &Mux{tree: &node{}, pool: &sync.Pool{}}
|
||||
mux.pool.New = func() interface{} {
|
||||
return NewRouteContext()
|
||||
}
|
||||
return mux
|
||||
}
|
||||
|
||||
// ServeHTTP is the single method of the http.Handler interface that makes
|
||||
// Mux interoperable with the standard library. It uses a sync.Pool to get and
|
||||
// reuse routing contexts for each request.
|
||||
func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Ensure the mux has some routes defined on the mux
|
||||
if mx.handler == nil {
|
||||
mx.NotFoundHandler().ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if a routing context already exists from a parent router.
|
||||
rctx, _ := r.Context().Value(RouteCtxKey).(*Context)
|
||||
if rctx != nil {
|
||||
mx.handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch a RouteContext object from the sync pool, and call the computed
|
||||
// mx.handler that is comprised of mx.middlewares + mx.routeHTTP.
|
||||
// Once the request is finished, reset the routing context and put it back
|
||||
// into the pool for reuse from another request.
|
||||
rctx = mx.pool.Get().(*Context)
|
||||
rctx.Reset()
|
||||
rctx.Routes = mx
|
||||
rctx.parentCtx = r.Context()
|
||||
|
||||
// NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation
|
||||
r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx))
|
||||
|
||||
// Serve the request and once its done, put the request context back in the sync pool
|
||||
mx.handler.ServeHTTP(w, r)
|
||||
mx.pool.Put(rctx)
|
||||
}
|
||||
|
||||
// Use appends a middleware handler to the Mux middleware stack.
|
||||
//
|
||||
// The middleware stack for any Mux will execute before searching for a matching
|
||||
// route to a specific handler, which provides opportunity to respond early,
|
||||
// change the course of the request execution, or set request-scoped values for
|
||||
// the next http.Handler.
|
||||
func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) {
|
||||
if mx.handler != nil {
|
||||
panic("chi: all middlewares must be defined before routes on a mux")
|
||||
}
|
||||
mx.middlewares = append(mx.middlewares, middlewares...)
|
||||
}
|
||||
|
||||
// Handle adds the route `pattern` that matches any http method to
|
||||
// execute the `handler` http.Handler.
|
||||
func (mx *Mux) Handle(pattern string, handler http.Handler) {
|
||||
if method, rest, found := strings.Cut(pattern, " "); found {
|
||||
mx.Method(method, rest, handler)
|
||||
return
|
||||
}
|
||||
|
||||
mx.handle(mALL, pattern, handler)
|
||||
}
|
||||
|
||||
// HandleFunc adds the route `pattern` that matches any http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) {
|
||||
if method, rest, found := strings.Cut(pattern, " "); found {
|
||||
mx.Method(method, rest, handlerFn)
|
||||
return
|
||||
}
|
||||
|
||||
mx.handle(mALL, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// Method adds the route `pattern` that matches `method` http method to
|
||||
// execute the `handler` http.Handler.
|
||||
func (mx *Mux) Method(method, pattern string, handler http.Handler) {
|
||||
m, ok := methodMap[strings.ToUpper(method)]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("chi: '%s' http method is not supported.", method))
|
||||
}
|
||||
mx.handle(m, pattern, handler)
|
||||
}
|
||||
|
||||
// MethodFunc adds the route `pattern` that matches `method` http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) {
|
||||
mx.Method(method, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// Connect adds the route `pattern` that matches a CONNECT http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) {
|
||||
mx.handle(mCONNECT, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// Delete adds the route `pattern` that matches a DELETE http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) {
|
||||
mx.handle(mDELETE, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// Get adds the route `pattern` that matches a GET http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) {
|
||||
mx.handle(mGET, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// Head adds the route `pattern` that matches a HEAD http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) {
|
||||
mx.handle(mHEAD, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// Options adds the route `pattern` that matches an OPTIONS http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) {
|
||||
mx.handle(mOPTIONS, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// Patch adds the route `pattern` that matches a PATCH http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) {
|
||||
mx.handle(mPATCH, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// Post adds the route `pattern` that matches a POST http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) {
|
||||
mx.handle(mPOST, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// Put adds the route `pattern` that matches a PUT http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) {
|
||||
mx.handle(mPUT, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// Trace adds the route `pattern` that matches a TRACE http method to
|
||||
// execute the `handlerFn` http.HandlerFunc.
|
||||
func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) {
|
||||
mx.handle(mTRACE, pattern, handlerFn)
|
||||
}
|
||||
|
||||
// NotFound sets a custom http.HandlerFunc for routing paths that could
|
||||
// not be found. The default 404 handler is `http.NotFound`.
|
||||
func (mx *Mux) NotFound(handlerFn http.HandlerFunc) {
|
||||
// Build NotFound handler chain
|
||||
m := mx
|
||||
hFn := handlerFn
|
||||
if mx.inline && mx.parent != nil {
|
||||
m = mx.parent
|
||||
hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
|
||||
}
|
||||
|
||||
// Update the notFoundHandler from this point forward
|
||||
m.notFoundHandler = hFn
|
||||
m.updateSubRoutes(func(subMux *Mux) {
|
||||
if subMux.notFoundHandler == nil {
|
||||
subMux.NotFound(hFn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the
|
||||
// method is unresolved. The default handler returns a 405 with an empty body.
|
||||
func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) {
|
||||
// Build MethodNotAllowed handler chain
|
||||
m := mx
|
||||
hFn := handlerFn
|
||||
if mx.inline && mx.parent != nil {
|
||||
m = mx.parent
|
||||
hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
|
||||
}
|
||||
|
||||
// Update the methodNotAllowedHandler from this point forward
|
||||
m.methodNotAllowedHandler = hFn
|
||||
m.updateSubRoutes(func(subMux *Mux) {
|
||||
if subMux.methodNotAllowedHandler == nil {
|
||||
subMux.MethodNotAllowed(hFn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// With adds inline middlewares for an endpoint handler.
|
||||
func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router {
|
||||
// Similarly as in handle(), we must build the mux handler once additional
|
||||
// middleware registration isn't allowed for this stack, like now.
|
||||
if !mx.inline && mx.handler == nil {
|
||||
mx.updateRouteHandler()
|
||||
}
|
||||
|
||||
// Copy middlewares from parent inline muxs
|
||||
var mws Middlewares
|
||||
if mx.inline {
|
||||
mws = make(Middlewares, len(mx.middlewares))
|
||||
copy(mws, mx.middlewares)
|
||||
}
|
||||
mws = append(mws, middlewares...)
|
||||
|
||||
im := &Mux{
|
||||
pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws,
|
||||
notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler,
|
||||
}
|
||||
|
||||
return im
|
||||
}
|
||||
|
||||
// Group creates a new inline-Mux with a copy of middleware stack. It's useful
|
||||
// for a group of handlers along the same routing path that use an additional
|
||||
// set of middlewares. See _examples/.
|
||||
func (mx *Mux) Group(fn func(r Router)) Router {
|
||||
im := mx.With()
|
||||
if fn != nil {
|
||||
fn(im)
|
||||
}
|
||||
return im
|
||||
}
|
||||
|
||||
// Route creates a new Mux and mounts it along the `pattern` as a subrouter.
|
||||
// Effectively, this is a short-hand call to Mount. See _examples/.
|
||||
func (mx *Mux) Route(pattern string, fn func(r Router)) Router {
|
||||
if fn == nil {
|
||||
panic(fmt.Sprintf("chi: attempting to Route() a nil subrouter on '%s'", pattern))
|
||||
}
|
||||
subRouter := NewRouter()
|
||||
fn(subRouter)
|
||||
mx.Mount(pattern, subRouter)
|
||||
return subRouter
|
||||
}
|
||||
|
||||
// Mount attaches another http.Handler or chi Router as a subrouter along a routing
|
||||
// path. It's very useful to split up a large API as many independent routers and
|
||||
// compose them as a single service using Mount. See _examples/.
|
||||
//
|
||||
// Note that Mount() simply sets a wildcard along the `pattern` that will continue
|
||||
// routing at the `handler`, which in most cases is another chi.Router. As a result,
|
||||
// if you define two Mount() routes on the exact same pattern the mount will panic.
|
||||
func (mx *Mux) Mount(pattern string, handler http.Handler) {
|
||||
if handler == nil {
|
||||
panic(fmt.Sprintf("chi: attempting to Mount() a nil handler on '%s'", pattern))
|
||||
}
|
||||
|
||||
// Provide runtime safety for ensuring a pattern isn't mounted on an existing
|
||||
// routing pattern.
|
||||
if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") {
|
||||
panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern))
|
||||
}
|
||||
|
||||
// Assign sub-Router's with the parent not found & method not allowed handler if not specified.
|
||||
subr, ok := handler.(*Mux)
|
||||
if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil {
|
||||
subr.NotFound(mx.notFoundHandler)
|
||||
}
|
||||
if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil {
|
||||
subr.MethodNotAllowed(mx.methodNotAllowedHandler)
|
||||
}
|
||||
|
||||
mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
rctx := RouteContext(r.Context())
|
||||
|
||||
// shift the url path past the previous subrouter
|
||||
rctx.RoutePath = mx.nextRoutePath(rctx)
|
||||
|
||||
// reset the wildcard URLParam which connects the subrouter
|
||||
n := len(rctx.URLParams.Keys) - 1
|
||||
if n >= 0 && rctx.URLParams.Keys[n] == "*" && len(rctx.URLParams.Values) > n {
|
||||
rctx.URLParams.Values[n] = ""
|
||||
}
|
||||
|
||||
handler.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
if pattern == "" || pattern[len(pattern)-1] != '/' {
|
||||
mx.handle(mALL|mSTUB, pattern, mountHandler)
|
||||
mx.handle(mALL|mSTUB, pattern+"/", mountHandler)
|
||||
pattern += "/"
|
||||
}
|
||||
|
||||
method := mALL
|
||||
subroutes, _ := handler.(Routes)
|
||||
if subroutes != nil {
|
||||
method |= mSTUB
|
||||
}
|
||||
n := mx.handle(method, pattern+"*", mountHandler)
|
||||
|
||||
if subroutes != nil {
|
||||
n.subroutes = subroutes
|
||||
}
|
||||
}
|
||||
|
||||
// Routes returns a slice of routing information from the tree,
|
||||
// useful for traversing available routes of a router.
|
||||
func (mx *Mux) Routes() []Route {
|
||||
return mx.tree.routes()
|
||||
}
|
||||
|
||||
// Middlewares returns a slice of middleware handler functions.
|
||||
func (mx *Mux) Middlewares() Middlewares {
|
||||
return mx.middlewares
|
||||
}
|
||||
|
||||
// Match searches the routing tree for a handler that matches the method/path.
|
||||
// It's similar to routing a http request, but without executing the handler
|
||||
// thereafter.
|
||||
//
|
||||
// Note: the *Context state is updated during execution, so manage
|
||||
// the state carefully or make a NewRouteContext().
|
||||
func (mx *Mux) Match(rctx *Context, method, path string) bool {
|
||||
return mx.Find(rctx, method, path) != ""
|
||||
}
|
||||
|
||||
// Find searches the routing tree for the pattern that matches
|
||||
// the method/path.
|
||||
//
|
||||
// Note: the *Context state is updated during execution, so manage
|
||||
// the state carefully or make a NewRouteContext().
|
||||
func (mx *Mux) Find(rctx *Context, method, path string) string {
|
||||
m, ok := methodMap[method]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
node, _, _ := mx.tree.FindRoute(rctx, m, path)
|
||||
pattern := rctx.routePattern
|
||||
|
||||
if node != nil {
|
||||
if node.subroutes == nil {
|
||||
e := node.endpoints[m]
|
||||
return e.pattern
|
||||
}
|
||||
|
||||
rctx.RoutePath = mx.nextRoutePath(rctx)
|
||||
subPattern := node.subroutes.Find(rctx, method, rctx.RoutePath)
|
||||
if subPattern == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
pattern = strings.TrimSuffix(pattern, "/*")
|
||||
pattern += subPattern
|
||||
}
|
||||
|
||||
return pattern
|
||||
}
|
||||
|
||||
// NotFoundHandler returns the default Mux 404 responder whenever a route
|
||||
// cannot be found.
|
||||
func (mx *Mux) NotFoundHandler() http.HandlerFunc {
|
||||
if mx.notFoundHandler != nil {
|
||||
return mx.notFoundHandler
|
||||
}
|
||||
return http.NotFound
|
||||
}
|
||||
|
||||
// MethodNotAllowedHandler returns the default Mux 405 responder whenever
|
||||
// a method cannot be resolved for a route.
|
||||
func (mx *Mux) MethodNotAllowedHandler(methodsAllowed ...methodTyp) http.HandlerFunc {
|
||||
if mx.methodNotAllowedHandler != nil {
|
||||
return mx.methodNotAllowedHandler
|
||||
}
|
||||
return methodNotAllowedHandler(methodsAllowed...)
|
||||
}
|
||||
|
||||
// handle registers a http.Handler in the routing tree for a particular http method
|
||||
// and routing pattern.
|
||||
func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node {
|
||||
if len(pattern) == 0 || pattern[0] != '/' {
|
||||
panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern))
|
||||
}
|
||||
|
||||
// Build the computed routing handler for this routing pattern.
|
||||
if !mx.inline && mx.handler == nil {
|
||||
mx.updateRouteHandler()
|
||||
}
|
||||
|
||||
// Build endpoint handler with inline middlewares for the route
|
||||
var h http.Handler
|
||||
if mx.inline {
|
||||
mx.handler = http.HandlerFunc(mx.routeHTTP)
|
||||
h = Chain(mx.middlewares...).Handler(handler)
|
||||
} else {
|
||||
h = handler
|
||||
}
|
||||
|
||||
// Add the endpoint to the tree and return the node
|
||||
return mx.tree.InsertRoute(method, pattern, h)
|
||||
}
|
||||
|
||||
// routeHTTP routes a http.Request through the Mux routing tree to serve
|
||||
// the matching handler for a particular http method.
|
||||
func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Grab the route context object
|
||||
rctx := r.Context().Value(RouteCtxKey).(*Context)
|
||||
|
||||
// The request routing path
|
||||
routePath := rctx.RoutePath
|
||||
if routePath == "" {
|
||||
if r.URL.RawPath != "" {
|
||||
routePath = r.URL.RawPath
|
||||
} else {
|
||||
routePath = r.URL.Path
|
||||
}
|
||||
if routePath == "" {
|
||||
routePath = "/"
|
||||
}
|
||||
}
|
||||
|
||||
// Check if method is supported by chi
|
||||
if rctx.RouteMethod == "" {
|
||||
rctx.RouteMethod = r.Method
|
||||
}
|
||||
method, ok := methodMap[rctx.RouteMethod]
|
||||
if !ok {
|
||||
mx.MethodNotAllowedHandler().ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Find the route
|
||||
if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil {
|
||||
if supportsPathValue {
|
||||
setPathValue(rctx, r)
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
if rctx.methodNotAllowed {
|
||||
mx.MethodNotAllowedHandler(rctx.methodsAllowed...).ServeHTTP(w, r)
|
||||
} else {
|
||||
mx.NotFoundHandler().ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func (mx *Mux) nextRoutePath(rctx *Context) string {
|
||||
routePath := "/"
|
||||
nx := len(rctx.routeParams.Keys) - 1 // index of last param in list
|
||||
if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx {
|
||||
routePath = "/" + rctx.routeParams.Values[nx]
|
||||
}
|
||||
return routePath
|
||||
}
|
||||
|
||||
// Recursively update data on child routers.
|
||||
func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) {
|
||||
for _, r := range mx.tree.routes() {
|
||||
subMux, ok := r.SubRoutes.(*Mux)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
fn(subMux)
|
||||
}
|
||||
}
|
||||
|
||||
// updateRouteHandler builds the single mux handler that is a chain of the middleware
|
||||
// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this
|
||||
// point, no other middlewares can be registered on this Mux's stack. But you can still
|
||||
// compose additional middlewares via Group()'s or using a chained middleware handler.
|
||||
func (mx *Mux) updateRouteHandler() {
|
||||
mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP))
|
||||
}
|
||||
|
||||
// methodNotAllowedHandler is a helper function to respond with a 405,
|
||||
// method not allowed. It sets the Allow header with the list of allowed
|
||||
// methods for the route.
|
||||
func methodNotAllowedHandler(methodsAllowed ...methodTyp) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
for _, m := range methodsAllowed {
|
||||
w.Header().Add("Allow", reverseMethodMap[m])
|
||||
}
|
||||
w.WriteHeader(405)
|
||||
w.Write(nil)
|
||||
}
|
||||
}
|
||||
20
vendor/github.com/go-chi/chi/v5/path_value.go
generated
vendored
Normal file
20
vendor/github.com/go-chi/chi/v5/path_value.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
//go:build go1.22 && !tinygo
|
||||
// +build go1.22,!tinygo
|
||||
|
||||
package chi
|
||||
|
||||
import "net/http"
|
||||
|
||||
// supportsPathValue is true if the Go version is 1.22 and above.
|
||||
//
|
||||
// If this is true, `net/http.Request` has methods `SetPathValue` and `PathValue`.
|
||||
const supportsPathValue = true
|
||||
|
||||
// setPathValue sets the path values in the Request value
|
||||
// based on the provided request context.
|
||||
func setPathValue(rctx *Context, r *http.Request) {
|
||||
for i, key := range rctx.URLParams.Keys {
|
||||
value := rctx.URLParams.Values[i]
|
||||
r.SetPathValue(key, value)
|
||||
}
|
||||
}
|
||||
19
vendor/github.com/go-chi/chi/v5/path_value_fallback.go
generated
vendored
Normal file
19
vendor/github.com/go-chi/chi/v5/path_value_fallback.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:build !go1.22 || tinygo
|
||||
// +build !go1.22 tinygo
|
||||
|
||||
package chi
|
||||
|
||||
import "net/http"
|
||||
|
||||
// supportsPathValue is true if the Go version is 1.22 and above.
|
||||
//
|
||||
// If this is true, `net/http.Request` has methods `SetPathValue` and `PathValue`.
|
||||
const supportsPathValue = false
|
||||
|
||||
// setPathValue sets the path values in the Request value
|
||||
// based on the provided request context.
|
||||
//
|
||||
// setPathValue is only supported in Go 1.22 and above so
|
||||
// this is just a blank function so that it compiles.
|
||||
func setPathValue(rctx *Context, r *http.Request) {
|
||||
}
|
||||
890
vendor/github.com/go-chi/chi/v5/tree.go
generated
vendored
Normal file
890
vendor/github.com/go-chi/chi/v5/tree.go
generated
vendored
Normal file
@@ -0,0 +1,890 @@
|
||||
package chi
|
||||
|
||||
// Radix tree implementation below is a based on the original work by
|
||||
// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go
|
||||
// (MIT licensed). It's been heavily modified for use as a HTTP routing tree.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type methodTyp uint
|
||||
|
||||
const (
|
||||
mSTUB methodTyp = 1 << iota
|
||||
mCONNECT
|
||||
mDELETE
|
||||
mGET
|
||||
mHEAD
|
||||
mOPTIONS
|
||||
mPATCH
|
||||
mPOST
|
||||
mPUT
|
||||
mTRACE
|
||||
)
|
||||
|
||||
var mALL = mCONNECT | mDELETE | mGET | mHEAD |
|
||||
mOPTIONS | mPATCH | mPOST | mPUT | mTRACE
|
||||
|
||||
var methodMap = map[string]methodTyp{
|
||||
http.MethodConnect: mCONNECT,
|
||||
http.MethodDelete: mDELETE,
|
||||
http.MethodGet: mGET,
|
||||
http.MethodHead: mHEAD,
|
||||
http.MethodOptions: mOPTIONS,
|
||||
http.MethodPatch: mPATCH,
|
||||
http.MethodPost: mPOST,
|
||||
http.MethodPut: mPUT,
|
||||
http.MethodTrace: mTRACE,
|
||||
}
|
||||
|
||||
var reverseMethodMap = map[methodTyp]string{
|
||||
mCONNECT: http.MethodConnect,
|
||||
mDELETE: http.MethodDelete,
|
||||
mGET: http.MethodGet,
|
||||
mHEAD: http.MethodHead,
|
||||
mOPTIONS: http.MethodOptions,
|
||||
mPATCH: http.MethodPatch,
|
||||
mPOST: http.MethodPost,
|
||||
mPUT: http.MethodPut,
|
||||
mTRACE: http.MethodTrace,
|
||||
}
|
||||
|
||||
// RegisterMethod adds support for custom HTTP method handlers, available
|
||||
// via Router#Method and Router#MethodFunc
|
||||
func RegisterMethod(method string) {
|
||||
if method == "" {
|
||||
return
|
||||
}
|
||||
method = strings.ToUpper(method)
|
||||
if _, ok := methodMap[method]; ok {
|
||||
return
|
||||
}
|
||||
n := len(methodMap)
|
||||
if n > strconv.IntSize-2 {
|
||||
panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize))
|
||||
}
|
||||
mt := methodTyp(2 << n)
|
||||
methodMap[method] = mt
|
||||
mALL |= mt
|
||||
}
|
||||
|
||||
type nodeTyp uint8
|
||||
|
||||
const (
|
||||
ntStatic nodeTyp = iota // /home
|
||||
ntRegexp // /{id:[0-9]+}
|
||||
ntParam // /{user}
|
||||
ntCatchAll // /api/v1/*
|
||||
)
|
||||
|
||||
type node struct {
|
||||
// subroutes on the leaf node
|
||||
subroutes Routes
|
||||
|
||||
// regexp matcher for regexp nodes
|
||||
rex *regexp.Regexp
|
||||
|
||||
// HTTP handler endpoints on the leaf node
|
||||
endpoints endpoints
|
||||
|
||||
// prefix is the common prefix we ignore
|
||||
prefix string
|
||||
|
||||
// child nodes should be stored in-order for iteration,
|
||||
// in groups of the node type.
|
||||
children [ntCatchAll + 1]nodes
|
||||
|
||||
// first byte of the child prefix
|
||||
tail byte
|
||||
|
||||
// node type: static, regexp, param, catchAll
|
||||
typ nodeTyp
|
||||
|
||||
// first byte of the prefix
|
||||
label byte
|
||||
}
|
||||
|
||||
// endpoints is a mapping of http method constants to handlers
|
||||
// for a given route.
|
||||
type endpoints map[methodTyp]*endpoint
|
||||
|
||||
type endpoint struct {
|
||||
// endpoint handler
|
||||
handler http.Handler
|
||||
|
||||
// pattern is the routing pattern for handler nodes
|
||||
pattern string
|
||||
|
||||
// parameter keys recorded on handler nodes
|
||||
paramKeys []string
|
||||
}
|
||||
|
||||
func (s endpoints) Value(method methodTyp) *endpoint {
|
||||
mh, ok := s[method]
|
||||
if !ok {
|
||||
mh = &endpoint{}
|
||||
s[method] = mh
|
||||
}
|
||||
return mh
|
||||
}
|
||||
|
||||
func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node {
|
||||
var parent *node
|
||||
search := pattern
|
||||
|
||||
for {
|
||||
// Handle key exhaustion
|
||||
if len(search) == 0 {
|
||||
// Insert or update the node's leaf handler
|
||||
n.setEndpoint(method, handler, pattern)
|
||||
return n
|
||||
}
|
||||
|
||||
// We're going to be searching for a wild node next,
|
||||
// in this case, we need to get the tail
|
||||
var label = search[0]
|
||||
var segTail byte
|
||||
var segEndIdx int
|
||||
var segTyp nodeTyp
|
||||
var segRexpat string
|
||||
if label == '{' || label == '*' {
|
||||
segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search)
|
||||
}
|
||||
|
||||
var prefix string
|
||||
if segTyp == ntRegexp {
|
||||
prefix = segRexpat
|
||||
}
|
||||
|
||||
// Look for the edge to attach to
|
||||
parent = n
|
||||
n = n.getEdge(segTyp, label, segTail, prefix)
|
||||
|
||||
// No edge, create one
|
||||
if n == nil {
|
||||
child := &node{label: label, tail: segTail, prefix: search}
|
||||
hn := parent.addChild(child, search)
|
||||
hn.setEndpoint(method, handler, pattern)
|
||||
|
||||
return hn
|
||||
}
|
||||
|
||||
// Found an edge to match the pattern
|
||||
|
||||
if n.typ > ntStatic {
|
||||
// We found a param node, trim the param from the search path and continue.
|
||||
// This param/wild pattern segment would already be on the tree from a previous
|
||||
// call to addChild when creating a new node.
|
||||
search = search[segEndIdx:]
|
||||
continue
|
||||
}
|
||||
|
||||
// Static nodes fall below here.
|
||||
// Determine longest prefix of the search key on match.
|
||||
commonPrefix := longestPrefix(search, n.prefix)
|
||||
if commonPrefix == len(n.prefix) {
|
||||
// the common prefix is as long as the current node's prefix we're attempting to insert.
|
||||
// keep the search going.
|
||||
search = search[commonPrefix:]
|
||||
continue
|
||||
}
|
||||
|
||||
// Split the node
|
||||
child := &node{
|
||||
typ: ntStatic,
|
||||
prefix: search[:commonPrefix],
|
||||
}
|
||||
parent.replaceChild(search[0], segTail, child)
|
||||
|
||||
// Restore the existing node
|
||||
n.label = n.prefix[commonPrefix]
|
||||
n.prefix = n.prefix[commonPrefix:]
|
||||
child.addChild(n, n.prefix)
|
||||
|
||||
// If the new key is a subset, set the method/handler on this node and finish.
|
||||
search = search[commonPrefix:]
|
||||
if len(search) == 0 {
|
||||
child.setEndpoint(method, handler, pattern)
|
||||
return child
|
||||
}
|
||||
|
||||
// Create a new edge for the node
|
||||
subchild := &node{
|
||||
typ: ntStatic,
|
||||
label: search[0],
|
||||
prefix: search,
|
||||
}
|
||||
hn := child.addChild(subchild, search)
|
||||
hn.setEndpoint(method, handler, pattern)
|
||||
return hn
|
||||
}
|
||||
}
|
||||
|
||||
// addChild appends the new `child` node to the tree using the `pattern` as the trie key.
|
||||
// For a URL router like chi's, we split the static, param, regexp and wildcard segments
|
||||
// into different nodes. In addition, addChild will recursively call itself until every
|
||||
// pattern segment is added to the url pattern tree as individual nodes, depending on type.
|
||||
func (n *node) addChild(child *node, prefix string) *node {
|
||||
search := prefix
|
||||
|
||||
// handler leaf node added to the tree is the child.
|
||||
// this may be overridden later down the flow
|
||||
hn := child
|
||||
|
||||
// Parse next segment
|
||||
segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search)
|
||||
|
||||
// Add child depending on next up segment
|
||||
switch segTyp {
|
||||
|
||||
case ntStatic:
|
||||
// Search prefix is all static (that is, has no params in path)
|
||||
// noop
|
||||
|
||||
default:
|
||||
// Search prefix contains a param, regexp or wildcard
|
||||
|
||||
if segTyp == ntRegexp {
|
||||
rex, err := regexp.Compile(segRexpat)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat))
|
||||
}
|
||||
child.prefix = segRexpat
|
||||
child.rex = rex
|
||||
}
|
||||
|
||||
if segStartIdx == 0 {
|
||||
// Route starts with a param
|
||||
child.typ = segTyp
|
||||
|
||||
if segTyp == ntCatchAll {
|
||||
segStartIdx = -1
|
||||
} else {
|
||||
segStartIdx = segEndIdx
|
||||
}
|
||||
if segStartIdx < 0 {
|
||||
segStartIdx = len(search)
|
||||
}
|
||||
child.tail = segTail // for params, we set the tail
|
||||
|
||||
if segStartIdx != len(search) {
|
||||
// add static edge for the remaining part, split the end.
|
||||
// its not possible to have adjacent param nodes, so its certainly
|
||||
// going to be a static node next.
|
||||
|
||||
search = search[segStartIdx:] // advance search position
|
||||
|
||||
nn := &node{
|
||||
typ: ntStatic,
|
||||
label: search[0],
|
||||
prefix: search,
|
||||
}
|
||||
hn = child.addChild(nn, search)
|
||||
}
|
||||
|
||||
} else if segStartIdx > 0 {
|
||||
// Route has some param
|
||||
|
||||
// starts with a static segment
|
||||
child.typ = ntStatic
|
||||
child.prefix = search[:segStartIdx]
|
||||
child.rex = nil
|
||||
|
||||
// add the param edge node
|
||||
search = search[segStartIdx:]
|
||||
|
||||
nn := &node{
|
||||
typ: segTyp,
|
||||
label: search[0],
|
||||
tail: segTail,
|
||||
}
|
||||
hn = child.addChild(nn, search)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
n.children[child.typ] = append(n.children[child.typ], child)
|
||||
n.children[child.typ].Sort()
|
||||
return hn
|
||||
}
|
||||
|
||||
func (n *node) replaceChild(label, tail byte, child *node) {
|
||||
for i := 0; i < len(n.children[child.typ]); i++ {
|
||||
if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail {
|
||||
n.children[child.typ][i] = child
|
||||
n.children[child.typ][i].label = label
|
||||
n.children[child.typ][i].tail = tail
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("chi: replacing missing child")
|
||||
}
|
||||
|
||||
func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node {
|
||||
nds := n.children[ntyp]
|
||||
for i := 0; i < len(nds); i++ {
|
||||
if nds[i].label == label && nds[i].tail == tail {
|
||||
if ntyp == ntRegexp && nds[i].prefix != prefix {
|
||||
continue
|
||||
}
|
||||
return nds[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) {
|
||||
// Set the handler for the method type on the node
|
||||
if n.endpoints == nil {
|
||||
n.endpoints = make(endpoints)
|
||||
}
|
||||
|
||||
paramKeys := patParamKeys(pattern)
|
||||
|
||||
if method&mSTUB == mSTUB {
|
||||
n.endpoints.Value(mSTUB).handler = handler
|
||||
}
|
||||
if method&mALL == mALL {
|
||||
h := n.endpoints.Value(mALL)
|
||||
h.handler = handler
|
||||
h.pattern = pattern
|
||||
h.paramKeys = paramKeys
|
||||
for _, m := range methodMap {
|
||||
h := n.endpoints.Value(m)
|
||||
h.handler = handler
|
||||
h.pattern = pattern
|
||||
h.paramKeys = paramKeys
|
||||
}
|
||||
} else {
|
||||
h := n.endpoints.Value(method)
|
||||
h.handler = handler
|
||||
h.pattern = pattern
|
||||
h.paramKeys = paramKeys
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) {
|
||||
// Reset the context routing pattern and params
|
||||
rctx.routePattern = ""
|
||||
rctx.routeParams.Keys = rctx.routeParams.Keys[:0]
|
||||
rctx.routeParams.Values = rctx.routeParams.Values[:0]
|
||||
|
||||
// Find the routing handlers for the path
|
||||
rn := n.findRoute(rctx, method, path)
|
||||
if rn == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// Record the routing params in the request lifecycle
|
||||
rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...)
|
||||
rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...)
|
||||
|
||||
// Record the routing pattern in the request lifecycle
|
||||
if rn.endpoints[method].pattern != "" {
|
||||
rctx.routePattern = rn.endpoints[method].pattern
|
||||
rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern)
|
||||
}
|
||||
|
||||
return rn, rn.endpoints, rn.endpoints[method].handler
|
||||
}
|
||||
|
||||
// Recursive edge traversal by checking all nodeTyp groups along the way.
|
||||
// It's like searching through a multi-dimensional radix trie.
|
||||
func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node {
|
||||
nn := n
|
||||
search := path
|
||||
|
||||
for t, nds := range nn.children {
|
||||
ntyp := nodeTyp(t)
|
||||
if len(nds) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var xn *node
|
||||
xsearch := search
|
||||
|
||||
var label byte
|
||||
if search != "" {
|
||||
label = search[0]
|
||||
}
|
||||
|
||||
switch ntyp {
|
||||
case ntStatic:
|
||||
xn = nds.findEdge(label)
|
||||
if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) {
|
||||
continue
|
||||
}
|
||||
xsearch = xsearch[len(xn.prefix):]
|
||||
|
||||
case ntParam, ntRegexp:
|
||||
// short-circuit and return no matching route for empty param values
|
||||
if xsearch == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// serially loop through each node grouped by the tail delimiter
|
||||
for idx := 0; idx < len(nds); idx++ {
|
||||
xn = nds[idx]
|
||||
|
||||
// label for param nodes is the delimiter byte
|
||||
p := strings.IndexByte(xsearch, xn.tail)
|
||||
|
||||
if p < 0 {
|
||||
if xn.tail == '/' {
|
||||
p = len(xsearch)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
} else if ntyp == ntRegexp && p == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if ntyp == ntRegexp && xn.rex != nil {
|
||||
if !xn.rex.MatchString(xsearch[:p]) {
|
||||
continue
|
||||
}
|
||||
} else if strings.IndexByte(xsearch[:p], '/') != -1 {
|
||||
// avoid a match across path segments
|
||||
continue
|
||||
}
|
||||
|
||||
prevlen := len(rctx.routeParams.Values)
|
||||
rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p])
|
||||
xsearch = xsearch[p:]
|
||||
|
||||
if len(xsearch) == 0 {
|
||||
if xn.isLeaf() {
|
||||
h := xn.endpoints[method]
|
||||
if h != nil && h.handler != nil {
|
||||
rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
|
||||
return xn
|
||||
}
|
||||
|
||||
for endpoints := range xn.endpoints {
|
||||
if endpoints == mALL || endpoints == mSTUB {
|
||||
continue
|
||||
}
|
||||
rctx.methodsAllowed = append(rctx.methodsAllowed, endpoints)
|
||||
}
|
||||
|
||||
// flag that the routing context found a route, but not a corresponding
|
||||
// supported method
|
||||
rctx.methodNotAllowed = true
|
||||
}
|
||||
}
|
||||
|
||||
// recursively find the next node on this branch
|
||||
fin := xn.findRoute(rctx, method, xsearch)
|
||||
if fin != nil {
|
||||
return fin
|
||||
}
|
||||
|
||||
// not found on this branch, reset vars
|
||||
rctx.routeParams.Values = rctx.routeParams.Values[:prevlen]
|
||||
xsearch = search
|
||||
}
|
||||
|
||||
rctx.routeParams.Values = append(rctx.routeParams.Values, "")
|
||||
|
||||
default:
|
||||
// catch-all nodes
|
||||
rctx.routeParams.Values = append(rctx.routeParams.Values, search)
|
||||
xn = nds[0]
|
||||
xsearch = ""
|
||||
}
|
||||
|
||||
if xn == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// did we find it yet?
|
||||
if len(xsearch) == 0 {
|
||||
if xn.isLeaf() {
|
||||
h := xn.endpoints[method]
|
||||
if h != nil && h.handler != nil {
|
||||
rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
|
||||
return xn
|
||||
}
|
||||
|
||||
for endpoints := range xn.endpoints {
|
||||
if endpoints == mALL || endpoints == mSTUB {
|
||||
continue
|
||||
}
|
||||
rctx.methodsAllowed = append(rctx.methodsAllowed, endpoints)
|
||||
}
|
||||
|
||||
// flag that the routing context found a route, but not a corresponding
|
||||
// supported method
|
||||
rctx.methodNotAllowed = true
|
||||
}
|
||||
}
|
||||
|
||||
// recursively find the next node..
|
||||
fin := xn.findRoute(rctx, method, xsearch)
|
||||
if fin != nil {
|
||||
return fin
|
||||
}
|
||||
|
||||
// Did not find final handler, let's remove the param here if it was set
|
||||
if xn.typ > ntStatic {
|
||||
if len(rctx.routeParams.Values) > 0 {
|
||||
rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1]
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *node) findEdge(ntyp nodeTyp, label byte) *node {
|
||||
nds := n.children[ntyp]
|
||||
num := len(nds)
|
||||
idx := 0
|
||||
|
||||
switch ntyp {
|
||||
case ntStatic, ntParam, ntRegexp:
|
||||
i, j := 0, num-1
|
||||
for i <= j {
|
||||
idx = i + (j-i)/2
|
||||
if label > nds[idx].label {
|
||||
i = idx + 1
|
||||
} else if label < nds[idx].label {
|
||||
j = idx - 1
|
||||
} else {
|
||||
i = num // breaks cond
|
||||
}
|
||||
}
|
||||
if nds[idx].label != label {
|
||||
return nil
|
||||
}
|
||||
return nds[idx]
|
||||
|
||||
default: // catch all
|
||||
return nds[idx]
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) isLeaf() bool {
|
||||
return n.endpoints != nil
|
||||
}
|
||||
|
||||
func (n *node) findPattern(pattern string) bool {
|
||||
nn := n
|
||||
for _, nds := range nn.children {
|
||||
if len(nds) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
n = nn.findEdge(nds[0].typ, pattern[0])
|
||||
if n == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var idx int
|
||||
var xpattern string
|
||||
|
||||
switch n.typ {
|
||||
case ntStatic:
|
||||
idx = longestPrefix(pattern, n.prefix)
|
||||
if idx < len(n.prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
case ntParam, ntRegexp:
|
||||
idx = strings.IndexByte(pattern, '}') + 1
|
||||
|
||||
case ntCatchAll:
|
||||
idx = longestPrefix(pattern, "*")
|
||||
|
||||
default:
|
||||
panic("chi: unknown node type")
|
||||
}
|
||||
|
||||
xpattern = pattern[idx:]
|
||||
if len(xpattern) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return n.findPattern(xpattern)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *node) routes() []Route {
|
||||
rts := []Route{}
|
||||
|
||||
n.walk(func(eps endpoints, subroutes Routes) bool {
|
||||
if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Group methodHandlers by unique patterns
|
||||
pats := make(map[string]endpoints)
|
||||
|
||||
for mt, h := range eps {
|
||||
if h.pattern == "" {
|
||||
continue
|
||||
}
|
||||
p, ok := pats[h.pattern]
|
||||
if !ok {
|
||||
p = endpoints{}
|
||||
pats[h.pattern] = p
|
||||
}
|
||||
p[mt] = h
|
||||
}
|
||||
|
||||
for p, mh := range pats {
|
||||
hs := make(map[string]http.Handler)
|
||||
if mh[mALL] != nil && mh[mALL].handler != nil {
|
||||
hs["*"] = mh[mALL].handler
|
||||
}
|
||||
|
||||
for mt, h := range mh {
|
||||
if h.handler == nil {
|
||||
continue
|
||||
}
|
||||
m := methodTypString(mt)
|
||||
if m == "" {
|
||||
continue
|
||||
}
|
||||
hs[m] = h.handler
|
||||
}
|
||||
|
||||
rt := Route{subroutes, hs, p}
|
||||
rts = append(rts, rt)
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
|
||||
return rts
|
||||
}
|
||||
|
||||
func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool {
|
||||
// Visit the leaf values if any
|
||||
if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
for _, ns := range n.children {
|
||||
for _, cn := range ns {
|
||||
if cn.walk(fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// patNextSegment returns the next segment details from a pattern:
|
||||
// node type, param key, regexp string, param tail byte, param starting index, param ending index
|
||||
func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) {
|
||||
ps := strings.Index(pattern, "{")
|
||||
ws := strings.Index(pattern, "*")
|
||||
|
||||
if ps < 0 && ws < 0 {
|
||||
return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
if ps >= 0 && ws >= 0 && ws < ps {
|
||||
panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'")
|
||||
}
|
||||
|
||||
var tail byte = '/' // Default endpoint tail to / byte
|
||||
|
||||
if ps >= 0 {
|
||||
// Param/Regexp pattern is next
|
||||
nt := ntParam
|
||||
|
||||
// Read to closing } taking into account opens and closes in curl count (cc)
|
||||
cc := 0
|
||||
pe := ps
|
||||
for i, c := range pattern[ps:] {
|
||||
if c == '{' {
|
||||
cc++
|
||||
} else if c == '}' {
|
||||
cc--
|
||||
if cc == 0 {
|
||||
pe = ps + i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if pe == ps {
|
||||
panic("chi: route param closing delimiter '}' is missing")
|
||||
}
|
||||
|
||||
key := pattern[ps+1 : pe]
|
||||
pe++ // set end to next position
|
||||
|
||||
if pe < len(pattern) {
|
||||
tail = pattern[pe]
|
||||
}
|
||||
|
||||
key, rexpat, isRegexp := strings.Cut(key, ":")
|
||||
if isRegexp {
|
||||
nt = ntRegexp
|
||||
}
|
||||
|
||||
if len(rexpat) > 0 {
|
||||
if rexpat[0] != '^' {
|
||||
rexpat = "^" + rexpat
|
||||
}
|
||||
if rexpat[len(rexpat)-1] != '$' {
|
||||
rexpat += "$"
|
||||
}
|
||||
}
|
||||
|
||||
return nt, key, rexpat, tail, ps, pe
|
||||
}
|
||||
|
||||
// Wildcard pattern as finale
|
||||
if ws < len(pattern)-1 {
|
||||
panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead")
|
||||
}
|
||||
return ntCatchAll, "*", "", 0, ws, len(pattern)
|
||||
}
|
||||
|
||||
func patParamKeys(pattern string) []string {
|
||||
pat := pattern
|
||||
paramKeys := []string{}
|
||||
for {
|
||||
ptyp, paramKey, _, _, _, e := patNextSegment(pat)
|
||||
if ptyp == ntStatic {
|
||||
return paramKeys
|
||||
}
|
||||
for i := 0; i < len(paramKeys); i++ {
|
||||
if paramKeys[i] == paramKey {
|
||||
panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey))
|
||||
}
|
||||
}
|
||||
paramKeys = append(paramKeys, paramKey)
|
||||
pat = pat[e:]
|
||||
}
|
||||
}
|
||||
|
||||
// longestPrefix finds the length of the shared prefix
|
||||
// of two strings
|
||||
func longestPrefix(k1, k2 string) int {
|
||||
max := len(k1)
|
||||
if l := len(k2); l < max {
|
||||
max = l
|
||||
}
|
||||
var i int
|
||||
for i = 0; i < max; i++ {
|
||||
if k1[i] != k2[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func methodTypString(method methodTyp) string {
|
||||
for s, t := range methodMap {
|
||||
if method == t {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type nodes []*node
|
||||
|
||||
// Sort the list of nodes by label
|
||||
func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() }
|
||||
func (ns nodes) Len() int { return len(ns) }
|
||||
func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
|
||||
func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label }
|
||||
|
||||
// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes.
|
||||
// The list order determines the traversal order.
|
||||
func (ns nodes) tailSort() {
|
||||
for i := len(ns) - 1; i >= 0; i-- {
|
||||
if ns[i].typ > ntStatic && ns[i].tail == '/' {
|
||||
ns.Swap(i, len(ns)-1)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ns nodes) findEdge(label byte) *node {
|
||||
num := len(ns)
|
||||
idx := 0
|
||||
i, j := 0, num-1
|
||||
for i <= j {
|
||||
idx = i + (j-i)/2
|
||||
if label > ns[idx].label {
|
||||
i = idx + 1
|
||||
} else if label < ns[idx].label {
|
||||
j = idx - 1
|
||||
} else {
|
||||
i = num // breaks cond
|
||||
}
|
||||
}
|
||||
if ns[idx].label != label {
|
||||
return nil
|
||||
}
|
||||
return ns[idx]
|
||||
}
|
||||
|
||||
// Route describes the details of a routing handler.
|
||||
// Handlers map key is an HTTP method
|
||||
type Route struct {
|
||||
SubRoutes Routes
|
||||
Handlers map[string]http.Handler
|
||||
Pattern string
|
||||
}
|
||||
|
||||
// WalkFunc is the type of the function called for each method and route visited by Walk.
|
||||
type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error
|
||||
|
||||
// Walk walks any router tree that implements Routes interface.
|
||||
func Walk(r Routes, walkFn WalkFunc) error {
|
||||
return walk(r, walkFn, "")
|
||||
}
|
||||
|
||||
func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error {
|
||||
for _, route := range r.Routes() {
|
||||
mws := make([]func(http.Handler) http.Handler, len(parentMw))
|
||||
copy(mws, parentMw)
|
||||
mws = append(mws, r.Middlewares()...)
|
||||
|
||||
if route.SubRoutes != nil {
|
||||
if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for method, handler := range route.Handlers {
|
||||
if method == "*" {
|
||||
// Ignore a "catchAll" method, since we pass down all the specific methods for each route.
|
||||
continue
|
||||
}
|
||||
|
||||
fullRoute := parentRoute + route.Pattern
|
||||
fullRoute = strings.Replace(fullRoute, "/*/", "/", -1)
|
||||
|
||||
if chain, ok := handler.(*ChainHandler); ok {
|
||||
if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := walkFn(method, fullRoute, handler, mws...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
21
vendor/github.com/go-chi/cors/LICENSE
generated
vendored
Normal file
21
vendor/github.com/go-chi/cors/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
Copyright (c) 2014 Olivier Poitrey <rs@dailymotion.com>
|
||||
Copyright (c) 2016-Present https://github.com/go-chi authors
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
43
vendor/github.com/go-chi/cors/README.md
generated
vendored
Normal file
43
vendor/github.com/go-chi/cors/README.md
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# CORS net/http middleware
|
||||
|
||||
[go-chi/cors](https://github.com/go-chi/cors) is a fork of [github.com/rs/cors](https://github.com/rs/cors) that
|
||||
provides a `net/http` compatible middleware for performing preflight CORS checks on the server side. These headers
|
||||
are required for using the browser native [Fetch API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).
|
||||
|
||||
This middleware is designed to be used as a top-level middleware on the [chi](https://github.com/go-chi/chi) router.
|
||||
Applying with within a `r.Group()` or using `With()` will not work without routes matching `OPTIONS` added.
|
||||
|
||||
## Install
|
||||
|
||||
`go get github.com/go-chi/cors`
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
func main() {
|
||||
r := chi.NewRouter()
|
||||
|
||||
// Basic CORS
|
||||
// for more ideas, see: https://developer.github.com/v3/#cross-origin-resource-sharing
|
||||
r.Use(cors.Handler(cors.Options{
|
||||
// AllowedOrigins: []string{"https://foo.com"}, // Use this to allow specific origin hosts
|
||||
AllowedOrigins: []string{"https://*", "http://*"},
|
||||
// AllowOriginFunc: func(r *http.Request, origin string) bool { return true },
|
||||
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token"},
|
||||
ExposedHeaders: []string{"Link"},
|
||||
AllowCredentials: false,
|
||||
MaxAge: 300, // Maximum value not ignored by any of major browsers
|
||||
}))
|
||||
|
||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("welcome"))
|
||||
})
|
||||
|
||||
http.ListenAndServe(":3000", r)
|
||||
}
|
||||
```
|
||||
|
||||
## Credits
|
||||
|
||||
All credit for the original work of this middleware goes out to [github.com/rs](https://github.com/rs).
|
||||
402
vendor/github.com/go-chi/cors/cors.go
generated
vendored
Normal file
402
vendor/github.com/go-chi/cors/cors.go
generated
vendored
Normal file
@@ -0,0 +1,402 @@
|
||||
// cors package is net/http handler to handle CORS related requests
|
||||
// as defined by http://www.w3.org/TR/cors/
|
||||
//
|
||||
// You can configure it by passing an option struct to cors.New:
|
||||
//
|
||||
// c := cors.New(cors.Options{
|
||||
// AllowedOrigins: []string{"foo.com"},
|
||||
// AllowedMethods: []string{"GET", "POST", "DELETE"},
|
||||
// AllowCredentials: true,
|
||||
// })
|
||||
//
|
||||
// Then insert the handler in the chain:
|
||||
//
|
||||
// handler = c.Handler(handler)
|
||||
//
|
||||
// See Options documentation for more options.
|
||||
//
|
||||
// The resulting handler is a standard net/http handler.
|
||||
package cors
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Options is a configuration container to setup the CORS middleware.
|
||||
type Options struct {
|
||||
// AllowedOrigins is a list of origins a cross-domain request can be executed from.
|
||||
// If the special "*" value is present in the list, all origins will be allowed.
|
||||
// An origin may contain a wildcard (*) to replace 0 or more characters
|
||||
// (i.e.: http://*.domain.com). Usage of wildcards implies a small performance penalty.
|
||||
// Only one wildcard can be used per origin.
|
||||
// Default value is ["*"]
|
||||
AllowedOrigins []string
|
||||
|
||||
// AllowOriginFunc is a custom function to validate the origin. It takes the origin
|
||||
// as argument and returns true if allowed or false otherwise. If this option is
|
||||
// set, the content of AllowedOrigins is ignored.
|
||||
AllowOriginFunc func(r *http.Request, origin string) bool
|
||||
|
||||
// AllowedMethods is a list of methods the client is allowed to use with
|
||||
// cross-domain requests. Default value is simple methods (HEAD, GET and POST).
|
||||
AllowedMethods []string
|
||||
|
||||
// AllowedHeaders is list of non simple headers the client is allowed to use with
|
||||
// cross-domain requests.
|
||||
// If the special "*" value is present in the list, all headers will be allowed.
|
||||
// Default value is [] but "Origin" is always appended to the list.
|
||||
AllowedHeaders []string
|
||||
|
||||
// ExposedHeaders indicates which headers are safe to expose to the API of a CORS
|
||||
// API specification
|
||||
ExposedHeaders []string
|
||||
|
||||
// AllowCredentials indicates whether the request can include user credentials like
|
||||
// cookies, HTTP authentication or client side SSL certificates.
|
||||
AllowCredentials bool
|
||||
|
||||
// MaxAge indicates how long (in seconds) the results of a preflight request
|
||||
// can be cached
|
||||
MaxAge int
|
||||
|
||||
// OptionsPassthrough instructs preflight to let other potential next handlers to
|
||||
// process the OPTIONS method. Turn this on if your application handles OPTIONS.
|
||||
OptionsPassthrough bool
|
||||
|
||||
// Debugging flag adds additional output to debug server side CORS issues
|
||||
Debug bool
|
||||
}
|
||||
|
||||
// Logger generic interface for logger
|
||||
type Logger interface {
|
||||
Printf(string, ...interface{})
|
||||
}
|
||||
|
||||
// Cors http handler
|
||||
type Cors struct {
|
||||
// Debug logger
|
||||
Log Logger
|
||||
|
||||
// Normalized list of plain allowed origins
|
||||
allowedOrigins []string
|
||||
|
||||
// List of allowed origins containing wildcards
|
||||
allowedWOrigins []wildcard
|
||||
|
||||
// Optional origin validator function
|
||||
allowOriginFunc func(r *http.Request, origin string) bool
|
||||
|
||||
// Normalized list of allowed headers
|
||||
allowedHeaders []string
|
||||
|
||||
// Normalized list of allowed methods
|
||||
allowedMethods []string
|
||||
|
||||
// Normalized list of exposed headers
|
||||
exposedHeaders []string
|
||||
maxAge int
|
||||
|
||||
// Set to true when allowed origins contains a "*"
|
||||
allowedOriginsAll bool
|
||||
|
||||
// Set to true when allowed headers contains a "*"
|
||||
allowedHeadersAll bool
|
||||
|
||||
allowCredentials bool
|
||||
optionPassthrough bool
|
||||
}
|
||||
|
||||
// New creates a new Cors handler with the provided options.
|
||||
func New(options Options) *Cors {
|
||||
c := &Cors{
|
||||
exposedHeaders: convert(options.ExposedHeaders, http.CanonicalHeaderKey),
|
||||
allowOriginFunc: options.AllowOriginFunc,
|
||||
allowCredentials: options.AllowCredentials,
|
||||
maxAge: options.MaxAge,
|
||||
optionPassthrough: options.OptionsPassthrough,
|
||||
}
|
||||
if options.Debug && c.Log == nil {
|
||||
c.Log = log.New(os.Stdout, "[cors] ", log.LstdFlags)
|
||||
}
|
||||
|
||||
// Normalize options
|
||||
// Note: for origins and methods matching, the spec requires a case-sensitive matching.
|
||||
// As it may error prone, we chose to ignore the spec here.
|
||||
|
||||
// Allowed Origins
|
||||
if len(options.AllowedOrigins) == 0 {
|
||||
if options.AllowOriginFunc == nil {
|
||||
// Default is all origins
|
||||
c.allowedOriginsAll = true
|
||||
}
|
||||
} else {
|
||||
c.allowedOrigins = []string{}
|
||||
c.allowedWOrigins = []wildcard{}
|
||||
for _, origin := range options.AllowedOrigins {
|
||||
// Normalize
|
||||
origin = strings.ToLower(origin)
|
||||
if origin == "*" {
|
||||
// If "*" is present in the list, turn the whole list into a match all
|
||||
c.allowedOriginsAll = true
|
||||
c.allowedOrigins = nil
|
||||
c.allowedWOrigins = nil
|
||||
break
|
||||
} else if i := strings.IndexByte(origin, '*'); i >= 0 {
|
||||
// Split the origin in two: start and end string without the *
|
||||
w := wildcard{origin[0:i], origin[i+1:]}
|
||||
c.allowedWOrigins = append(c.allowedWOrigins, w)
|
||||
} else {
|
||||
c.allowedOrigins = append(c.allowedOrigins, origin)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allowed Headers
|
||||
if len(options.AllowedHeaders) == 0 {
|
||||
// Use sensible defaults
|
||||
c.allowedHeaders = []string{"Origin", "Accept", "Content-Type"}
|
||||
} else {
|
||||
// Origin is always appended as some browsers will always request for this header at preflight
|
||||
c.allowedHeaders = convert(append(options.AllowedHeaders, "Origin"), http.CanonicalHeaderKey)
|
||||
for _, h := range options.AllowedHeaders {
|
||||
if h == "*" {
|
||||
c.allowedHeadersAll = true
|
||||
c.allowedHeaders = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allowed Methods
|
||||
if len(options.AllowedMethods) == 0 {
|
||||
// Default is spec's "simple" methods
|
||||
c.allowedMethods = []string{http.MethodGet, http.MethodPost, http.MethodHead}
|
||||
} else {
|
||||
c.allowedMethods = convert(options.AllowedMethods, strings.ToUpper)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Handler creates a new Cors handler with passed options.
|
||||
func Handler(options Options) func(next http.Handler) http.Handler {
|
||||
c := New(options)
|
||||
return c.Handler
|
||||
}
|
||||
|
||||
// AllowAll create a new Cors handler with permissive configuration allowing all
|
||||
// origins with all standard methods with any header and credentials.
|
||||
func AllowAll() *Cors {
|
||||
return New(Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{
|
||||
http.MethodHead,
|
||||
http.MethodGet,
|
||||
http.MethodPost,
|
||||
http.MethodPut,
|
||||
http.MethodPatch,
|
||||
http.MethodDelete,
|
||||
},
|
||||
AllowedHeaders: []string{"*"},
|
||||
AllowCredentials: false,
|
||||
})
|
||||
}
|
||||
|
||||
// Handler apply the CORS specification on the request, and add relevant CORS headers
|
||||
// as necessary.
|
||||
func (c *Cors) Handler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// null or empty Origin header value is acceptable and it is considered having that header
|
||||
_, hasOriginHeader := r.Header["Origin"]
|
||||
|
||||
if r.Method == http.MethodOptions && r.Header.Get("Access-Control-Request-Method") != "" && hasOriginHeader {
|
||||
c.logf("Handler: Preflight request")
|
||||
c.handlePreflight(w, r)
|
||||
// Preflight requests are standalone and should stop the chain as some other
|
||||
// middleware may not handle OPTIONS requests correctly. One typical example
|
||||
// is authentication middleware ; OPTIONS requests won't carry authentication
|
||||
// headers (see #1)
|
||||
if c.optionPassthrough {
|
||||
next.ServeHTTP(w, r)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
} else {
|
||||
c.logf("Handler: Actual request")
|
||||
c.handleActualRequest(w, r)
|
||||
next.ServeHTTP(w, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// handlePreflight handles pre-flight CORS requests
|
||||
func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) {
|
||||
headers := w.Header()
|
||||
origin := r.Header.Get("Origin")
|
||||
|
||||
if r.Method != http.MethodOptions {
|
||||
c.logf("Preflight aborted: %s!=OPTIONS", r.Method)
|
||||
return
|
||||
}
|
||||
// Always set Vary headers
|
||||
// see https://github.com/rs/cors/issues/10,
|
||||
// https://github.com/rs/cors/commit/dbdca4d95feaa7511a46e6f1efb3b3aa505bc43f#commitcomment-12352001
|
||||
headers.Add("Vary", "Origin")
|
||||
headers.Add("Vary", "Access-Control-Request-Method")
|
||||
headers.Add("Vary", "Access-Control-Request-Headers")
|
||||
|
||||
if !c.isOriginAllowed(r, origin) {
|
||||
c.logf("Preflight aborted: origin '%s' not allowed", origin)
|
||||
return
|
||||
}
|
||||
|
||||
reqMethod := r.Header.Get("Access-Control-Request-Method")
|
||||
if !c.isMethodAllowed(reqMethod) {
|
||||
c.logf("Preflight aborted: method '%s' not allowed", reqMethod)
|
||||
return
|
||||
}
|
||||
reqHeaders := parseHeaderList(r.Header.Get("Access-Control-Request-Headers"))
|
||||
if !c.areHeadersAllowed(reqHeaders) {
|
||||
c.logf("Preflight aborted: headers '%v' not allowed", reqHeaders)
|
||||
return
|
||||
}
|
||||
if c.allowedOriginsAll {
|
||||
headers.Set("Access-Control-Allow-Origin", "*")
|
||||
} else {
|
||||
headers.Set("Access-Control-Allow-Origin", origin)
|
||||
}
|
||||
// Spec says: Since the list of methods can be unbounded, simply returning the method indicated
|
||||
// by Access-Control-Request-Method (if supported) can be enough
|
||||
headers.Set("Access-Control-Allow-Methods", strings.ToUpper(reqMethod))
|
||||
if len(reqHeaders) > 0 {
|
||||
|
||||
// Spec says: Since the list of headers can be unbounded, simply returning supported headers
|
||||
// from Access-Control-Request-Headers can be enough
|
||||
headers.Set("Access-Control-Allow-Headers", strings.Join(reqHeaders, ", "))
|
||||
}
|
||||
if c.allowCredentials {
|
||||
headers.Set("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
if c.maxAge > 0 {
|
||||
headers.Set("Access-Control-Max-Age", strconv.Itoa(c.maxAge))
|
||||
}
|
||||
c.logf("Preflight response headers: %v", headers)
|
||||
}
|
||||
|
||||
// handleActualRequest handles simple cross-origin requests, actual request or redirects
|
||||
func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) {
|
||||
headers := w.Header()
|
||||
// null Origin header value is acceptable and it is considered having that header
|
||||
_, hasOriginHeader := r.Header["Origin"]
|
||||
|
||||
// Always set Vary, see https://github.com/rs/cors/issues/10
|
||||
headers.Add("Vary", "Origin")
|
||||
|
||||
if !hasOriginHeader {
|
||||
c.logf("Actual request no headers added: missing origin")
|
||||
return
|
||||
}
|
||||
origin := r.Header.Get("Origin")
|
||||
if !c.isOriginAllowed(r, origin) {
|
||||
c.logf("Actual request no headers added: origin '%s' not allowed", origin)
|
||||
return
|
||||
}
|
||||
|
||||
// Note that spec does define a way to specifically disallow a simple method like GET or
|
||||
// POST. Access-Control-Allow-Methods is only used for pre-flight requests and the
|
||||
// spec doesn't instruct to check the allowed methods for simple cross-origin requests.
|
||||
// We think it's a nice feature to be able to have control on those methods though.
|
||||
if !c.isMethodAllowed(r.Method) {
|
||||
c.logf("Actual request no headers added: method '%s' not allowed", r.Method)
|
||||
|
||||
return
|
||||
}
|
||||
if c.allowedOriginsAll {
|
||||
headers.Set("Access-Control-Allow-Origin", "*")
|
||||
} else {
|
||||
headers.Set("Access-Control-Allow-Origin", origin)
|
||||
}
|
||||
if len(c.exposedHeaders) > 0 {
|
||||
headers.Set("Access-Control-Expose-Headers", strings.Join(c.exposedHeaders, ", "))
|
||||
}
|
||||
if c.allowCredentials {
|
||||
headers.Set("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
c.logf("Actual response added headers: %v", headers)
|
||||
}
|
||||
|
||||
// convenience method. checks if a logger is set.
|
||||
func (c *Cors) logf(format string, a ...interface{}) {
|
||||
if c.Log != nil {
|
||||
c.Log.Printf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// isOriginAllowed checks if a given origin is allowed to perform cross-domain requests
|
||||
// on the endpoint
|
||||
func (c *Cors) isOriginAllowed(r *http.Request, origin string) bool {
|
||||
if c.allowOriginFunc != nil {
|
||||
return c.allowOriginFunc(r, origin)
|
||||
}
|
||||
if c.allowedOriginsAll {
|
||||
return true
|
||||
}
|
||||
origin = strings.ToLower(origin)
|
||||
for _, o := range c.allowedOrigins {
|
||||
if o == origin {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, w := range c.allowedWOrigins {
|
||||
if w.match(origin) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isMethodAllowed checks if a given method can be used as part of a cross-domain request
|
||||
// on the endpoint
|
||||
func (c *Cors) isMethodAllowed(method string) bool {
|
||||
if len(c.allowedMethods) == 0 {
|
||||
// If no method allowed, always return false, even for preflight request
|
||||
return false
|
||||
}
|
||||
method = strings.ToUpper(method)
|
||||
if method == http.MethodOptions {
|
||||
// Always allow preflight requests
|
||||
return true
|
||||
}
|
||||
for _, m := range c.allowedMethods {
|
||||
if m == method {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// areHeadersAllowed checks if a given list of headers are allowed to used within
|
||||
// a cross-domain request.
|
||||
func (c *Cors) areHeadersAllowed(requestedHeaders []string) bool {
|
||||
if c.allowedHeadersAll || len(requestedHeaders) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, header := range requestedHeaders {
|
||||
header = http.CanonicalHeaderKey(header)
|
||||
found := false
|
||||
for _, h := range c.allowedHeaders {
|
||||
if h == header {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
70
vendor/github.com/go-chi/cors/utils.go
generated
vendored
Normal file
70
vendor/github.com/go-chi/cors/utils.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
package cors
|
||||
|
||||
import "strings"
|
||||
|
||||
const toLower = 'a' - 'A'
|
||||
|
||||
type converter func(string) string
|
||||
|
||||
type wildcard struct {
|
||||
prefix string
|
||||
suffix string
|
||||
}
|
||||
|
||||
func (w wildcard) match(s string) bool {
|
||||
return len(s) >= len(w.prefix+w.suffix) && strings.HasPrefix(s, w.prefix) && strings.HasSuffix(s, w.suffix)
|
||||
}
|
||||
|
||||
// convert converts a list of string using the passed converter function
|
||||
func convert(s []string, c converter) []string {
|
||||
out := []string{}
|
||||
for _, i := range s {
|
||||
out = append(out, c(i))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// parseHeaderList tokenize + normalize a string containing a list of headers
|
||||
func parseHeaderList(headerList string) []string {
|
||||
l := len(headerList)
|
||||
h := make([]byte, 0, l)
|
||||
upper := true
|
||||
// Estimate the number headers in order to allocate the right splice size
|
||||
t := 0
|
||||
for i := 0; i < l; i++ {
|
||||
if headerList[i] == ',' {
|
||||
t++
|
||||
}
|
||||
}
|
||||
headers := make([]string, 0, t)
|
||||
for i := 0; i < l; i++ {
|
||||
b := headerList[i]
|
||||
if b >= 'a' && b <= 'z' {
|
||||
if upper {
|
||||
h = append(h, b-toLower)
|
||||
} else {
|
||||
h = append(h, b)
|
||||
}
|
||||
} else if b >= 'A' && b <= 'Z' {
|
||||
if !upper {
|
||||
h = append(h, b+toLower)
|
||||
} else {
|
||||
h = append(h, b)
|
||||
}
|
||||
} else if b == '-' || b == '_' || b == '.' || (b >= '0' && b <= '9') {
|
||||
h = append(h, b)
|
||||
}
|
||||
|
||||
if b == ' ' || b == ',' || i == l-1 {
|
||||
if len(h) > 0 {
|
||||
// Flush the found header
|
||||
headers = append(headers, string(h))
|
||||
h = h[:0]
|
||||
upper = true
|
||||
}
|
||||
} else {
|
||||
upper = b == '-'
|
||||
}
|
||||
}
|
||||
return headers
|
||||
}
|
||||
21
vendor/github.com/go-viper/mapstructure/v2/.editorconfig
generated
vendored
Normal file
21
vendor/github.com/go-viper/mapstructure/v2/.editorconfig
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
||||
[{Makefile,*.mk}]
|
||||
indent_style = tab
|
||||
|
||||
[*.nix]
|
||||
indent_size = 2
|
||||
|
||||
[.golangci.yaml]
|
||||
indent_size = 2
|
||||
4
vendor/github.com/go-viper/mapstructure/v2/.envrc
generated
vendored
Normal file
4
vendor/github.com/go-viper/mapstructure/v2/.envrc
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
|
||||
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
|
||||
fi
|
||||
use flake . --impure
|
||||
6
vendor/github.com/go-viper/mapstructure/v2/.gitignore
generated
vendored
Normal file
6
vendor/github.com/go-viper/mapstructure/v2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
/.devenv/
|
||||
/.direnv/
|
||||
/.pre-commit-config.yaml
|
||||
/bin/
|
||||
/build/
|
||||
/var/
|
||||
48
vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
generated
vendored
Normal file
48
vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
version: "2"
|
||||
|
||||
run:
|
||||
timeout: 10m
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- govet
|
||||
- ineffassign
|
||||
# - misspell
|
||||
- nolintlint
|
||||
# - revive
|
||||
|
||||
disable:
|
||||
- errcheck
|
||||
- staticcheck
|
||||
- unused
|
||||
|
||||
settings:
|
||||
misspell:
|
||||
locale: US
|
||||
nolintlint:
|
||||
allow-unused: false # report any unused nolint directives
|
||||
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
# - golines
|
||||
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- localmodule
|
||||
gofmt:
|
||||
simplify: true
|
||||
rewrite-rules:
|
||||
- pattern: interface{}
|
||||
replacement: any
|
||||
|
||||
exclusions:
|
||||
paths:
|
||||
- internal/
|
||||
104
vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
generated
vendored
Normal file
104
vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
> [!WARNING]
|
||||
> As of v2 of this library, change log can be found in GitHub releases.
|
||||
|
||||
## 1.5.1
|
||||
|
||||
* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282]
|
||||
* Fix map of slices not decoding properly in certain cases. [GH-266]
|
||||
|
||||
## 1.5.0
|
||||
|
||||
* New option `IgnoreUntaggedFields` to ignore decoding to any fields
|
||||
without `mapstructure` (or the configured tag name) set [GH-277]
|
||||
* New option `ErrorUnset` which makes it an error if any fields
|
||||
in a target struct are not set by the decoding process. [GH-225]
|
||||
* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
|
||||
* Decoding to slice from array no longer crashes [GH-265]
|
||||
* Decode nested struct pointers to map [GH-271]
|
||||
* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
|
||||
* Fix issue where fields with `,omitempty` would sometimes decode
|
||||
into a map with an empty string key [GH-281]
|
||||
|
||||
## 1.4.3
|
||||
|
||||
* Fix cases where `json.Number` didn't decode properly [GH-261]
|
||||
|
||||
## 1.4.2
|
||||
|
||||
* Custom name matchers to support any sort of casing, formatting, etc. for
|
||||
field names. [GH-250]
|
||||
* Fix possible panic in ComposeDecodeHookFunc [GH-251]
|
||||
|
||||
## 1.4.1
|
||||
|
||||
* Fix regression where `*time.Time` value would be set to empty and not be sent
|
||||
to decode hooks properly [GH-232]
|
||||
|
||||
## 1.4.0
|
||||
|
||||
* A new decode hook type `DecodeHookFuncValue` has been added that has
|
||||
access to the full values. [GH-183]
|
||||
* Squash is now supported with embedded fields that are struct pointers [GH-205]
|
||||
* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
|
||||
|
||||
## 1.3.3
|
||||
|
||||
* Decoding maps from maps creates a settable value for decode hooks [GH-203]
|
||||
|
||||
## 1.3.2
|
||||
|
||||
* Decode into interface type with a struct value is supported [GH-187]
|
||||
|
||||
## 1.3.1
|
||||
|
||||
* Squash should only squash embedded structs. [GH-194]
|
||||
|
||||
## 1.3.0
|
||||
|
||||
* Added `",omitempty"` support. This will ignore zero values in the source
|
||||
structure when encoding. [GH-145]
|
||||
|
||||
## 1.2.3
|
||||
|
||||
* Fix duplicate entries in Keys list with pointer values. [GH-185]
|
||||
|
||||
## 1.2.2
|
||||
|
||||
* Do not add unsettable (unexported) values to the unused metadata key
|
||||
or "remain" value. [GH-150]
|
||||
|
||||
## 1.2.1
|
||||
|
||||
* Go modules checksum mismatch fix
|
||||
|
||||
## 1.2.0
|
||||
|
||||
* Added support to capture unused values in a field using the `",remain"` value
|
||||
in the mapstructure tag. There is an example to showcase usage.
|
||||
* Added `DecoderConfig` option to always squash embedded structs
|
||||
* `json.Number` can decode into `uint` types
|
||||
* Empty slices are preserved and not replaced with nil slices
|
||||
* Fix panic that can occur in when decoding a map into a nil slice of structs
|
||||
* Improved package documentation for godoc
|
||||
|
||||
## 1.1.2
|
||||
|
||||
* Fix error when decode hook decodes interface implementation into interface
|
||||
type. [GH-140]
|
||||
|
||||
## 1.1.1
|
||||
|
||||
* Fix panic that can happen in `decodePtr`
|
||||
|
||||
## 1.1.0
|
||||
|
||||
* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
|
||||
* Support struct to struct decoding [GH-137]
|
||||
* If source map value is nil, then destination map value is nil (instead of empty)
|
||||
* If source slice value is nil, then destination slice value is nil (instead of empty)
|
||||
* If source pointer is nil, then destination pointer is set to nil (instead of
|
||||
allocated zero value of type)
|
||||
|
||||
## 1.0.0
|
||||
|
||||
* Initial tagged stable release.
|
||||
21
vendor/github.com/go-viper/mapstructure/v2/LICENSE
generated
vendored
Normal file
21
vendor/github.com/go-viper/mapstructure/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
81
vendor/github.com/go-viper/mapstructure/v2/README.md
generated
vendored
Normal file
81
vendor/github.com/go-viper/mapstructure/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
# mapstructure
|
||||
|
||||
[](https://github.com/go-viper/mapstructure/actions/workflows/ci.yaml)
|
||||
[](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2)
|
||||

|
||||
[](https://deps.dev/go/github.com%252Fgo-viper%252Fmapstructure%252Fv2)
|
||||
|
||||
mapstructure is a Go library for decoding generic map values to structures
|
||||
and vice versa, while providing helpful error handling.
|
||||
|
||||
This library is most useful when decoding values from some data stream (JSON,
|
||||
Gob, etc.) where you don't _quite_ know the structure of the underlying data
|
||||
until you read a part of it. You can therefore read a `map[string]interface{}`
|
||||
and use this library to decode it into the proper underlying native Go
|
||||
structure.
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
go get github.com/go-viper/mapstructure/v2
|
||||
```
|
||||
|
||||
## Migrating from `github.com/mitchellh/mapstructure`
|
||||
|
||||
[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status.
|
||||
|
||||
You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`.
|
||||
The API is the same, so you don't need to change anything else.
|
||||
|
||||
Here is a script that can help you with the migration:
|
||||
|
||||
```shell
|
||||
sed -i 's|github.com/mitchellh/mapstructure|github.com/go-viper/mapstructure/v2|g' $(find . -type f -name '*.go')
|
||||
```
|
||||
|
||||
If you need more time to migrate your code, that is absolutely fine.
|
||||
|
||||
Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate:
|
||||
|
||||
```shell
|
||||
replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0
|
||||
```
|
||||
|
||||
## Usage & Example
|
||||
|
||||
For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2).
|
||||
|
||||
The `Decode` function has examples associated with it there.
|
||||
|
||||
## But Why?!
|
||||
|
||||
Go offers fantastic standard libraries for decoding formats such as JSON.
|
||||
The standard method is to have a struct pre-created, and populate that struct
|
||||
from the bytes of the encoded format. This is great, but the problem is if
|
||||
you have configuration or an encoding that changes slightly depending on
|
||||
specific fields. For example, consider this JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "person",
|
||||
"name": "Mitchell"
|
||||
}
|
||||
```
|
||||
|
||||
Perhaps we can't populate a specific structure without first reading
|
||||
the "type" field from the JSON. We could always do two passes over the
|
||||
decoding of the JSON (reading the "type" first, and the rest later).
|
||||
However, it is much simpler to just decode this into a `map[string]interface{}`
|
||||
structure, read the "type" key, then use something like this library
|
||||
to decode it into the proper structure.
|
||||
|
||||
## Credits
|
||||
|
||||
Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh).
|
||||
This is a maintained fork of the original library.
|
||||
|
||||
Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349).
|
||||
|
||||
## License
|
||||
|
||||
The project is licensed under the [MIT License](LICENSE).
|
||||
714
vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
generated
vendored
Normal file
714
vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go
generated
vendored
Normal file
@@ -0,0 +1,714 @@
|
||||
package mapstructure
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// typedDecodeHook takes a raw DecodeHookFunc (an any) and turns
|
||||
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
|
||||
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
|
||||
// Create variables here so we can reference them with the reflect pkg
|
||||
var f1 DecodeHookFuncType
|
||||
var f2 DecodeHookFuncKind
|
||||
var f3 DecodeHookFuncValue
|
||||
|
||||
// Fill in the variables into this interface and the rest is done
|
||||
// automatically using the reflect package.
|
||||
potential := []any{f1, f2, f3}
|
||||
|
||||
v := reflect.ValueOf(h)
|
||||
vt := v.Type()
|
||||
for _, raw := range potential {
|
||||
pt := reflect.ValueOf(raw).Type()
|
||||
if vt.ConvertibleTo(pt) {
|
||||
return v.Convert(pt).Interface()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cachedDecodeHook takes a raw DecodeHookFunc (an any) and turns
|
||||
// it into a closure to be used directly
|
||||
// if the type fails to convert we return a closure always erroring to keep the previous behaviour
|
||||
func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (any, error) {
|
||||
switch f := typedDecodeHook(raw).(type) {
|
||||
case DecodeHookFuncType:
|
||||
return func(from reflect.Value, to reflect.Value) (any, error) {
|
||||
return f(from.Type(), to.Type(), from.Interface())
|
||||
}
|
||||
case DecodeHookFuncKind:
|
||||
return func(from reflect.Value, to reflect.Value) (any, error) {
|
||||
return f(from.Kind(), to.Kind(), from.Interface())
|
||||
}
|
||||
case DecodeHookFuncValue:
|
||||
return func(from reflect.Value, to reflect.Value) (any, error) {
|
||||
return f(from, to)
|
||||
}
|
||||
default:
|
||||
return func(from reflect.Value, to reflect.Value) (any, error) {
|
||||
return nil, errors.New("invalid decode hook signature")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeHookExec executes the given decode hook. This should be used
|
||||
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
|
||||
// that took reflect.Kind instead of reflect.Type.
|
||||
func DecodeHookExec(
|
||||
raw DecodeHookFunc,
|
||||
from reflect.Value, to reflect.Value,
|
||||
) (any, error) {
|
||||
switch f := typedDecodeHook(raw).(type) {
|
||||
case DecodeHookFuncType:
|
||||
return f(from.Type(), to.Type(), from.Interface())
|
||||
case DecodeHookFuncKind:
|
||||
return f(from.Kind(), to.Kind(), from.Interface())
|
||||
case DecodeHookFuncValue:
|
||||
return f(from, to)
|
||||
default:
|
||||
return nil, errors.New("invalid decode hook signature")
|
||||
}
|
||||
}
|
||||
|
||||
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
|
||||
// automatically composes multiple DecodeHookFuncs.
|
||||
//
|
||||
// The composed funcs are called in order, with the result of the
|
||||
// previous transformation.
|
||||
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||
cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(fs))
|
||||
for _, f := range fs {
|
||||
cached = append(cached, cachedDecodeHook(f))
|
||||
}
|
||||
return func(f reflect.Value, t reflect.Value) (any, error) {
|
||||
var err error
|
||||
data := f.Interface()
|
||||
|
||||
newFrom := f
|
||||
for _, c := range cached {
|
||||
data, err = c(newFrom, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v, ok := data.(reflect.Value); ok {
|
||||
newFrom = v
|
||||
} else {
|
||||
newFrom = reflect.ValueOf(data)
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
||||
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
|
||||
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
|
||||
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
|
||||
cached := make([]func(from reflect.Value, to reflect.Value) (any, error), 0, len(ff))
|
||||
for _, f := range ff {
|
||||
cached = append(cached, cachedDecodeHook(f))
|
||||
}
|
||||
return func(a, b reflect.Value) (any, error) {
|
||||
var allErrs string
|
||||
var out any
|
||||
var err error
|
||||
|
||||
for _, c := range cached {
|
||||
out, err = c(a, b)
|
||||
if err != nil {
|
||||
allErrs += err.Error() + "\n"
|
||||
continue
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
return nil, errors.New(allErrs)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToSliceHookFunc returns a DecodeHookFunc that converts
|
||||
// string to []string by splitting on the given sep.
|
||||
func StringToSliceHookFunc(sep string) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.SliceOf(f) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
raw := data.(string)
|
||||
if raw == "" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return strings.Split(raw, sep), nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToWeakSliceHookFunc brings back the old (pre-v2) behavior of [StringToSliceHookFunc].
|
||||
//
|
||||
// As of mapstructure v2.0.0 [StringToSliceHookFunc] checks if the return type is a string slice.
|
||||
// This function removes that check.
|
||||
func StringToWeakSliceHookFunc(sep string) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Slice {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
raw := data.(string)
|
||||
if raw == "" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return strings.Split(raw, sep), nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to time.Duration.
|
||||
func StringToTimeDurationHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Duration(5)) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
d, err := time.ParseDuration(data.(string))
|
||||
|
||||
return d, wrapTimeParseDurationError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToTimeLocationHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to *time.Location.
|
||||
func StringToTimeLocationHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Local) {
|
||||
return data, nil
|
||||
}
|
||||
d, err := time.LoadLocation(data.(string))
|
||||
|
||||
return d, wrapTimeParseLocationError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToURLHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to *url.URL.
|
||||
func StringToURLHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(&url.URL{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
u, err := url.Parse(data.(string))
|
||||
|
||||
return u, wrapUrlError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToIPHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to net.IP
|
||||
func StringToIPHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(net.IP{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
ip := net.ParseIP(data.(string))
|
||||
if ip == nil {
|
||||
return net.IP{}, fmt.Errorf("failed parsing ip")
|
||||
}
|
||||
|
||||
return ip, nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to net.IPNet
|
||||
func StringToIPNetHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(net.IPNet{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
_, net, err := net.ParseCIDR(data.(string))
|
||||
return net, wrapNetParseError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToTimeHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to time.Time.
|
||||
func StringToTimeHookFunc(layout string) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Time{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
ti, err := time.Parse(layout, data.(string))
|
||||
|
||||
return ti, wrapTimeParseError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
|
||||
// the decoder.
|
||||
//
|
||||
// Note that this is significantly different from the WeaklyTypedInput option
|
||||
// of the DecoderConfig.
|
||||
func WeaklyTypedHook(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
data any,
|
||||
) (any, error) {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
switch t {
|
||||
case reflect.String:
|
||||
switch f {
|
||||
case reflect.Bool:
|
||||
if dataVal.Bool() {
|
||||
return "1", nil
|
||||
}
|
||||
return "0", nil
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
|
||||
case reflect.Int:
|
||||
return strconv.FormatInt(dataVal.Int(), 10), nil
|
||||
case reflect.Slice:
|
||||
dataType := dataVal.Type()
|
||||
elemKind := dataType.Elem().Kind()
|
||||
if elemKind == reflect.Uint8 {
|
||||
return string(dataVal.Interface().([]uint8)), nil
|
||||
}
|
||||
case reflect.Uint:
|
||||
return strconv.FormatUint(dataVal.Uint(), 10), nil
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func RecursiveStructToMapHookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Value, t reflect.Value) (any, error) {
|
||||
if f.Kind() != reflect.Struct {
|
||||
return f.Interface(), nil
|
||||
}
|
||||
|
||||
var i any = struct{}{}
|
||||
if t.Type() != reflect.TypeOf(&i).Elem() {
|
||||
return f.Interface(), nil
|
||||
}
|
||||
|
||||
m := make(map[string]any)
|
||||
t.Set(reflect.ValueOf(m))
|
||||
|
||||
return f.Interface(), nil
|
||||
}
|
||||
}
|
||||
|
||||
// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
|
||||
// strings to the UnmarshalText function, when the target type
|
||||
// implements the encoding.TextUnmarshaler interface
|
||||
func TextUnmarshallerHookFunc() DecodeHookFuncType {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
result := reflect.New(t).Interface()
|
||||
unmarshaller, ok := result.(encoding.TextUnmarshaler)
|
||||
if !ok {
|
||||
return data, nil
|
||||
}
|
||||
str, ok := data.(string)
|
||||
if !ok {
|
||||
str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String()
|
||||
}
|
||||
if err := unmarshaller.UnmarshalText([]byte(str)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to netip.Addr.
|
||||
func StringToNetIPAddrHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(netip.Addr{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
addr, err := netip.ParseAddr(data.(string))
|
||||
|
||||
return addr, wrapNetIPParseAddrError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to netip.AddrPort.
|
||||
func StringToNetIPAddrPortHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(netip.AddrPort{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
addrPort, err := netip.ParseAddrPort(data.(string))
|
||||
|
||||
return addrPort, wrapNetIPParseAddrPortError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToNetIPPrefixHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to netip.Prefix.
|
||||
func StringToNetIPPrefixHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data any,
|
||||
) (any, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(netip.Prefix{}) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
prefix, err := netip.ParsePrefix(data.(string))
|
||||
|
||||
return prefix, wrapNetIPParsePrefixError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to basic types.
|
||||
// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128
|
||||
func StringToBasicTypeHookFunc() DecodeHookFunc {
|
||||
return ComposeDecodeHookFunc(
|
||||
StringToInt8HookFunc(),
|
||||
StringToUint8HookFunc(),
|
||||
StringToInt16HookFunc(),
|
||||
StringToUint16HookFunc(),
|
||||
StringToInt32HookFunc(),
|
||||
StringToUint32HookFunc(),
|
||||
StringToInt64HookFunc(),
|
||||
StringToUint64HookFunc(),
|
||||
StringToIntHookFunc(),
|
||||
StringToUintHookFunc(),
|
||||
StringToFloat32HookFunc(),
|
||||
StringToFloat64HookFunc(),
|
||||
StringToBoolHookFunc(),
|
||||
// byte and rune are aliases for uint8 and int32 respectively
|
||||
// StringToByteHookFunc(),
|
||||
// StringToRuneHookFunc(),
|
||||
StringToComplex64HookFunc(),
|
||||
StringToComplex128HookFunc(),
|
||||
)
|
||||
}
|
||||
|
||||
// StringToInt8HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to int8.
|
||||
func StringToInt8HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Int8 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
i64, err := strconv.ParseInt(data.(string), 0, 8)
|
||||
return int8(i64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToUint8HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to uint8.
|
||||
func StringToUint8HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
u64, err := strconv.ParseUint(data.(string), 0, 8)
|
||||
return uint8(u64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToInt16HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to int16.
|
||||
func StringToInt16HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Int16 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
i64, err := strconv.ParseInt(data.(string), 0, 16)
|
||||
return int16(i64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToUint16HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to uint16.
|
||||
func StringToUint16HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
u64, err := strconv.ParseUint(data.(string), 0, 16)
|
||||
return uint16(u64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToInt32HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to int32.
|
||||
func StringToInt32HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Int32 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
i64, err := strconv.ParseInt(data.(string), 0, 32)
|
||||
return int32(i64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToUint32HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to uint32.
|
||||
func StringToUint32HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
u64, err := strconv.ParseUint(data.(string), 0, 32)
|
||||
return uint32(u64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToInt64HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to int64.
|
||||
func StringToInt64HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Int64 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
i64, err := strconv.ParseInt(data.(string), 0, 64)
|
||||
return int64(i64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToUint64HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to uint64.
|
||||
func StringToUint64HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
u64, err := strconv.ParseUint(data.(string), 0, 64)
|
||||
return uint64(u64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToIntHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to int.
|
||||
func StringToIntHookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Int {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
i64, err := strconv.ParseInt(data.(string), 0, 0)
|
||||
return int(i64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToUintHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to uint.
|
||||
func StringToUintHookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Uint {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
u64, err := strconv.ParseUint(data.(string), 0, 0)
|
||||
return uint(u64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToFloat32HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to float32.
|
||||
func StringToFloat32HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Float32 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
f64, err := strconv.ParseFloat(data.(string), 32)
|
||||
return float32(f64), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToFloat64HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to float64.
|
||||
func StringToFloat64HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Float64 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
f64, err := strconv.ParseFloat(data.(string), 64)
|
||||
return f64, wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToBoolHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to bool.
|
||||
func StringToBoolHookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Bool {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
b, err := strconv.ParseBool(data.(string))
|
||||
return b, wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToByteHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to byte.
|
||||
func StringToByteHookFunc() DecodeHookFunc {
|
||||
return StringToUint8HookFunc()
|
||||
}
|
||||
|
||||
// StringToRuneHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to rune.
|
||||
func StringToRuneHookFunc() DecodeHookFunc {
|
||||
return StringToInt32HookFunc()
|
||||
}
|
||||
|
||||
// StringToComplex64HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to complex64.
|
||||
func StringToComplex64HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
c128, err := strconv.ParseComplex(data.(string), 64)
|
||||
return complex64(c128), wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StringToComplex128HookFunc returns a DecodeHookFunc that converts
|
||||
// strings to complex128.
|
||||
func StringToComplex128HookFunc() DecodeHookFunc {
|
||||
return func(f reflect.Type, t reflect.Type, data any) (any, error) {
|
||||
if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
c128, err := strconv.ParseComplex(data.(string), 128)
|
||||
return c128, wrapStrconvNumError(err)
|
||||
}
|
||||
}
|
||||
244
vendor/github.com/go-viper/mapstructure/v2/errors.go
generated
vendored
Normal file
244
vendor/github.com/go-viper/mapstructure/v2/errors.go
generated
vendored
Normal file
@@ -0,0 +1,244 @@
|
||||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Error interface is implemented by all errors emitted by mapstructure.
|
||||
//
|
||||
// Use [errors.As] to check if an error implements this interface.
|
||||
type Error interface {
|
||||
error
|
||||
|
||||
mapstructure()
|
||||
}
|
||||
|
||||
// DecodeError is a generic error type that holds information about
|
||||
// a decoding error together with the name of the field that caused the error.
|
||||
type DecodeError struct {
|
||||
name string
|
||||
err error
|
||||
}
|
||||
|
||||
func newDecodeError(name string, err error) *DecodeError {
|
||||
return &DecodeError{
|
||||
name: name,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *DecodeError) Name() string {
|
||||
return e.name
|
||||
}
|
||||
|
||||
func (e *DecodeError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
func (e *DecodeError) Error() string {
|
||||
return fmt.Sprintf("'%s' %s", e.name, e.err)
|
||||
}
|
||||
|
||||
func (*DecodeError) mapstructure() {}
|
||||
|
||||
// ParseError is an error type that indicates a value could not be parsed
|
||||
// into the expected type.
|
||||
type ParseError struct {
|
||||
Expected reflect.Value
|
||||
Value any
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ParseError) Error() string {
|
||||
return fmt.Sprintf("cannot parse value as '%s': %s", e.Expected.Type(), e.Err)
|
||||
}
|
||||
|
||||
func (*ParseError) mapstructure() {}
|
||||
|
||||
// UnconvertibleTypeError is an error type that indicates a value could not be
|
||||
// converted to the expected type.
|
||||
type UnconvertibleTypeError struct {
|
||||
Expected reflect.Value
|
||||
Value any
|
||||
}
|
||||
|
||||
func (e *UnconvertibleTypeError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"expected type '%s', got unconvertible type '%s'",
|
||||
e.Expected.Type(),
|
||||
reflect.TypeOf(e.Value),
|
||||
)
|
||||
}
|
||||
|
||||
func (*UnconvertibleTypeError) mapstructure() {}
|
||||
|
||||
func wrapStrconvNumError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err, ok := err.(*strconv.NumError); ok {
|
||||
return &strconvNumError{Err: err}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type strconvNumError struct {
|
||||
Err *strconv.NumError
|
||||
}
|
||||
|
||||
func (e *strconvNumError) Error() string {
|
||||
return "strconv." + e.Err.Func + ": " + e.Err.Err.Error()
|
||||
}
|
||||
|
||||
func (e *strconvNumError) Unwrap() error { return e.Err }
|
||||
|
||||
func wrapUrlError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err, ok := err.(*url.Error); ok {
|
||||
return &urlError{Err: err}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type urlError struct {
|
||||
Err *url.Error
|
||||
}
|
||||
|
||||
func (e *urlError) Error() string {
|
||||
return fmt.Sprintf("%s", e.Err.Err)
|
||||
}
|
||||
|
||||
func (e *urlError) Unwrap() error { return e.Err }
|
||||
|
||||
func wrapNetParseError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err, ok := err.(*net.ParseError); ok {
|
||||
return &netParseError{Err: err}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type netParseError struct {
|
||||
Err *net.ParseError
|
||||
}
|
||||
|
||||
func (e *netParseError) Error() string {
|
||||
return "invalid " + e.Err.Type
|
||||
}
|
||||
|
||||
func (e *netParseError) Unwrap() error { return e.Err }
|
||||
|
||||
func wrapTimeParseError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err, ok := err.(*time.ParseError); ok {
|
||||
return &timeParseError{Err: err}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type timeParseError struct {
|
||||
Err *time.ParseError
|
||||
}
|
||||
|
||||
func (e *timeParseError) Error() string {
|
||||
if e.Err.Message == "" {
|
||||
return fmt.Sprintf("parsing time as %q: cannot parse as %q", e.Err.Layout, e.Err.LayoutElem)
|
||||
}
|
||||
|
||||
return "parsing time " + e.Err.Message
|
||||
}
|
||||
|
||||
func (e *timeParseError) Unwrap() error { return e.Err }
|
||||
|
||||
func wrapNetIPParseAddrError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if errMsg := err.Error(); strings.HasPrefix(errMsg, "ParseAddr") {
|
||||
errPieces := strings.Split(errMsg, ": ")
|
||||
|
||||
return fmt.Errorf("ParseAddr: %s", errPieces[len(errPieces)-1])
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func wrapNetIPParseAddrPortError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
errMsg := err.Error()
|
||||
if strings.HasPrefix(errMsg, "invalid port ") {
|
||||
return errors.New("invalid port")
|
||||
} else if strings.HasPrefix(errMsg, "invalid ip:port ") {
|
||||
return errors.New("invalid ip:port")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func wrapNetIPParsePrefixError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if errMsg := err.Error(); strings.HasPrefix(errMsg, "netip.ParsePrefix") {
|
||||
errPieces := strings.Split(errMsg, ": ")
|
||||
|
||||
return fmt.Errorf("netip.ParsePrefix: %s", errPieces[len(errPieces)-1])
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func wrapTimeParseDurationError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
errMsg := err.Error()
|
||||
if strings.HasPrefix(errMsg, "time: unknown unit ") {
|
||||
return errors.New("time: unknown unit")
|
||||
} else if strings.HasPrefix(errMsg, "time: ") {
|
||||
idx := strings.LastIndex(errMsg, " ")
|
||||
|
||||
return errors.New(errMsg[:idx])
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func wrapTimeParseLocationError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
errMsg := err.Error()
|
||||
if strings.Contains(errMsg, "unknown time zone") || strings.HasPrefix(errMsg, "time: unknown format") {
|
||||
return fmt.Errorf("invalid time zone format: %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
294
vendor/github.com/go-viper/mapstructure/v2/flake.lock
generated
vendored
Normal file
294
vendor/github.com/go-viper/mapstructure/v2/flake.lock
generated
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
{
|
||||
"nodes": {
|
||||
"cachix": {
|
||||
"inputs": {
|
||||
"devenv": [
|
||||
"devenv"
|
||||
],
|
||||
"flake-compat": [
|
||||
"devenv"
|
||||
],
|
||||
"git-hooks": [
|
||||
"devenv"
|
||||
],
|
||||
"nixpkgs": "nixpkgs"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1742042642,
|
||||
"narHash": "sha256-D0gP8srrX0qj+wNYNPdtVJsQuFzIng3q43thnHXQ/es=",
|
||||
"owner": "cachix",
|
||||
"repo": "cachix",
|
||||
"rev": "a624d3eaf4b1d225f918de8543ed739f2f574203",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "latest",
|
||||
"repo": "cachix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"devenv": {
|
||||
"inputs": {
|
||||
"cachix": "cachix",
|
||||
"flake-compat": "flake-compat",
|
||||
"git-hooks": "git-hooks",
|
||||
"nix": "nix",
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1744876578,
|
||||
"narHash": "sha256-8MTBj2REB8t29sIBLpxbR0+AEGJ7f+RkzZPAGsFd40c=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "7ff7c351bba20d0615be25ecdcbcf79b57b85fe1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1733328505,
|
||||
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-parts": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": [
|
||||
"devenv",
|
||||
"nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1712014858,
|
||||
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-parts_2": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1743550720,
|
||||
"narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "c621e8422220273271f52058f618c94e405bb0f5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"git-hooks": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"devenv"
|
||||
],
|
||||
"gitignore": "gitignore",
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1742649964,
|
||||
"narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=",
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"git-hooks",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709087332,
|
||||
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"libgit2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1697646580,
|
||||
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
|
||||
"owner": "libgit2",
|
||||
"repo": "libgit2",
|
||||
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "libgit2",
|
||||
"repo": "libgit2",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"devenv"
|
||||
],
|
||||
"flake-parts": "flake-parts",
|
||||
"libgit2": "libgit2",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"nixpkgs-23-11": [
|
||||
"devenv"
|
||||
],
|
||||
"nixpkgs-regression": [
|
||||
"devenv"
|
||||
],
|
||||
"pre-commit-hooks": [
|
||||
"devenv"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1741798497,
|
||||
"narHash": "sha256-E3j+3MoY8Y96mG1dUIiLFm2tZmNbRvSiyN7CrSKuAVg=",
|
||||
"owner": "domenkozar",
|
||||
"repo": "nix",
|
||||
"rev": "f3f44b2baaf6c4c6e179de8cbb1cc6db031083cd",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "domenkozar",
|
||||
"ref": "devenv-2.24",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1733212471,
|
||||
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1743296961,
|
||||
"narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1717432640,
|
||||
"narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "88269ab3044128b7c2f4c7d68448b2fb50456870",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "release-24.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_3": {
|
||||
"locked": {
|
||||
"lastModified": 1733477122,
|
||||
"narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv-nixpkgs",
|
||||
"rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "rolling",
|
||||
"repo": "devenv-nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_4": {
|
||||
"locked": {
|
||||
"lastModified": 1744536153,
|
||||
"narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"devenv": "devenv",
|
||||
"flake-parts": "flake-parts_2",
|
||||
"nixpkgs": "nixpkgs_4"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
46
vendor/github.com/go-viper/mapstructure/v2/flake.nix
generated
vendored
Normal file
46
vendor/github.com/go-viper/mapstructure/v2/flake.nix
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
devenv.url = "github:cachix/devenv";
|
||||
};
|
||||
|
||||
outputs =
|
||||
inputs@{ flake-parts, ... }:
|
||||
flake-parts.lib.mkFlake { inherit inputs; } {
|
||||
imports = [
|
||||
inputs.devenv.flakeModule
|
||||
];
|
||||
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
|
||||
perSystem =
|
||||
{ pkgs, ... }:
|
||||
rec {
|
||||
devenv.shells = {
|
||||
default = {
|
||||
languages = {
|
||||
go.enable = true;
|
||||
};
|
||||
|
||||
pre-commit.hooks = {
|
||||
nixpkgs-fmt.enable = true;
|
||||
};
|
||||
|
||||
packages = with pkgs; [
|
||||
golangci-lint
|
||||
];
|
||||
|
||||
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
|
||||
containers = pkgs.lib.mkForce { };
|
||||
};
|
||||
|
||||
ci = devenv.shells.default;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
11
vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
generated
vendored
Normal file
11
vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
package errors
|
||||
|
||||
import "errors"
|
||||
|
||||
func New(text string) error {
|
||||
return errors.New(text)
|
||||
}
|
||||
|
||||
func As(err error, target interface{}) bool {
|
||||
return errors.As(err, target)
|
||||
}
|
||||
9
vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
generated
vendored
Normal file
9
vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build go1.20
|
||||
|
||||
package errors
|
||||
|
||||
import "errors"
|
||||
|
||||
func Join(errs ...error) error {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
61
vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
generated
vendored
Normal file
61
vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
//go:build !go1.20
|
||||
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package errors
|
||||
|
||||
// Join returns an error that wraps the given errors.
|
||||
// Any nil error values are discarded.
|
||||
// Join returns nil if every value in errs is nil.
|
||||
// The error formats as the concatenation of the strings obtained
|
||||
// by calling the Error method of each element of errs, with a newline
|
||||
// between each string.
|
||||
//
|
||||
// A non-nil error returned by Join implements the Unwrap() []error method.
|
||||
func Join(errs ...error) error {
|
||||
n := 0
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
n++
|
||||
}
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
e := &joinError{
|
||||
errs: make([]error, 0, n),
|
||||
}
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
e.errs = append(e.errs, err)
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
type joinError struct {
|
||||
errs []error
|
||||
}
|
||||
|
||||
func (e *joinError) Error() string {
|
||||
// Since Join returns nil if every value in errs is nil,
|
||||
// e.errs cannot be empty.
|
||||
if len(e.errs) == 1 {
|
||||
return e.errs[0].Error()
|
||||
}
|
||||
|
||||
b := []byte(e.errs[0].Error())
|
||||
for _, err := range e.errs[1:] {
|
||||
b = append(b, '\n')
|
||||
b = append(b, err.Error()...)
|
||||
}
|
||||
// At this point, b has at least one byte '\n'.
|
||||
// return unsafe.String(&b[0], len(b))
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (e *joinError) Unwrap() []error {
|
||||
return e.errs
|
||||
}
|
||||
1712
vendor/github.com/go-viper/mapstructure/v2/mapstructure.go
generated
vendored
Normal file
1712
vendor/github.com/go-viper/mapstructure/v2/mapstructure.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
44
vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go
generated
vendored
Normal file
44
vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
//go:build !go1.20
|
||||
|
||||
package mapstructure
|
||||
|
||||
import "reflect"
|
||||
|
||||
func isComparable(v reflect.Value) bool {
|
||||
k := v.Kind()
|
||||
switch k {
|
||||
case reflect.Invalid:
|
||||
return false
|
||||
|
||||
case reflect.Array:
|
||||
switch v.Type().Elem().Kind() {
|
||||
case reflect.Interface, reflect.Array, reflect.Struct:
|
||||
for i := 0; i < v.Type().Len(); i++ {
|
||||
// if !v.Index(i).Comparable() {
|
||||
if !isComparable(v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return v.Type().Comparable()
|
||||
|
||||
case reflect.Interface:
|
||||
// return v.Elem().Comparable()
|
||||
return isComparable(v.Elem())
|
||||
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
return false
|
||||
|
||||
// if !v.Field(i).Comparable() {
|
||||
if !isComparable(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
default:
|
||||
return v.Type().Comparable()
|
||||
}
|
||||
}
|
||||
10
vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go
generated
vendored
Normal file
10
vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build go1.20
|
||||
|
||||
package mapstructure
|
||||
|
||||
import "reflect"
|
||||
|
||||
// TODO: remove once we drop support for Go <1.20
|
||||
func isComparable(v reflect.Value) bool {
|
||||
return v.Comparable()
|
||||
}
|
||||
201
vendor/github.com/inconshreveable/mousetrap/LICENSE
generated
vendored
Normal file
201
vendor/github.com/inconshreveable/mousetrap/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2022 Alan Shreve (@inconshreveable)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
23
vendor/github.com/inconshreveable/mousetrap/README.md
generated
vendored
Normal file
23
vendor/github.com/inconshreveable/mousetrap/README.md
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# mousetrap
|
||||
|
||||
mousetrap is a tiny library that answers a single question.
|
||||
|
||||
On a Windows machine, was the process invoked by someone double clicking on
|
||||
the executable file while browsing in explorer?
|
||||
|
||||
### Motivation
|
||||
|
||||
Windows developers unfamiliar with command line tools will often "double-click"
|
||||
the executable for a tool. Because most CLI tools print the help and then exit
|
||||
when invoked without arguments, this is often very frustrating for those users.
|
||||
|
||||
mousetrap provides a way to detect these invocations so that you can provide
|
||||
more helpful behavior and instructions on how to run the CLI tool. To see what
|
||||
this looks like, both from an organizational and a technical perspective, see
|
||||
https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
|
||||
|
||||
### The interface
|
||||
|
||||
The library exposes a single interface:
|
||||
|
||||
func StartedByExplorer() (bool)
|
||||
16
vendor/github.com/inconshreveable/mousetrap/trap_others.go
generated
vendored
Normal file
16
vendor/github.com/inconshreveable/mousetrap/trap_others.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package mousetrap
|
||||
|
||||
// StartedByExplorer returns true if the program was invoked by the user
|
||||
// double-clicking on the executable from explorer.exe
|
||||
//
|
||||
// It is conservative and returns false if any of the internal calls fail.
|
||||
// It does not guarantee that the program was run from a terminal. It only can tell you
|
||||
// whether it was launched from explorer.exe
|
||||
//
|
||||
// On non-Windows platforms, it always returns false.
|
||||
func StartedByExplorer() bool {
|
||||
return false
|
||||
}
|
||||
42
vendor/github.com/inconshreveable/mousetrap/trap_windows.go
generated
vendored
Normal file
42
vendor/github.com/inconshreveable/mousetrap/trap_windows.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package mousetrap
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
|
||||
snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CloseHandle(snapshot)
|
||||
var procEntry syscall.ProcessEntry32
|
||||
procEntry.Size = uint32(unsafe.Sizeof(procEntry))
|
||||
if err = syscall.Process32First(snapshot, &procEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for {
|
||||
if procEntry.ProcessID == uint32(pid) {
|
||||
return &procEntry, nil
|
||||
}
|
||||
err = syscall.Process32Next(snapshot, &procEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartedByExplorer returns true if the program was invoked by the user double-clicking
|
||||
// on the executable from explorer.exe
|
||||
//
|
||||
// It is conservative and returns false if any of the internal calls fail.
|
||||
// It does not guarantee that the program was run from a terminal. It only can tell you
|
||||
// whether it was launched from explorer.exe
|
||||
func StartedByExplorer() bool {
|
||||
pe, err := getProcessEntry(syscall.Getppid())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
|
||||
}
|
||||
2
vendor/github.com/pelletier/go-toml/v2/.dockerignore
generated
vendored
Normal file
2
vendor/github.com/pelletier/go-toml/v2/.dockerignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
cmd/tomll/tomll
|
||||
cmd/tomljson/tomljson
|
||||
4
vendor/github.com/pelletier/go-toml/v2/.gitattributes
generated
vendored
Normal file
4
vendor/github.com/pelletier/go-toml/v2/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
* text=auto
|
||||
|
||||
benchmark/benchmark.toml text eol=lf
|
||||
testdata/** text eol=lf
|
||||
7
vendor/github.com/pelletier/go-toml/v2/.gitignore
generated
vendored
Normal file
7
vendor/github.com/pelletier/go-toml/v2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
test_program/test_program_bin
|
||||
fuzz/
|
||||
cmd/tomll/tomll
|
||||
cmd/tomljson/tomljson
|
||||
cmd/tomltestgen/tomltestgen
|
||||
dist
|
||||
tests/
|
||||
84
vendor/github.com/pelletier/go-toml/v2/.golangci.toml
generated
vendored
Normal file
84
vendor/github.com/pelletier/go-toml/v2/.golangci.toml
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
[service]
|
||||
golangci-lint-version = "1.39.0"
|
||||
|
||||
[linters-settings.wsl]
|
||||
allow-assign-and-anything = true
|
||||
|
||||
[linters-settings.exhaustive]
|
||||
default-signifies-exhaustive = true
|
||||
|
||||
[linters]
|
||||
disable-all = true
|
||||
enable = [
|
||||
"asciicheck",
|
||||
"bodyclose",
|
||||
"cyclop",
|
||||
"deadcode",
|
||||
"depguard",
|
||||
"dogsled",
|
||||
"dupl",
|
||||
"durationcheck",
|
||||
"errcheck",
|
||||
"errorlint",
|
||||
"exhaustive",
|
||||
# "exhaustivestruct",
|
||||
"exportloopref",
|
||||
"forbidigo",
|
||||
# "forcetypeassert",
|
||||
"funlen",
|
||||
"gci",
|
||||
# "gochecknoglobals",
|
||||
"gochecknoinits",
|
||||
"gocognit",
|
||||
"goconst",
|
||||
"gocritic",
|
||||
"gocyclo",
|
||||
"godot",
|
||||
"godox",
|
||||
# "goerr113",
|
||||
"gofmt",
|
||||
"gofumpt",
|
||||
"goheader",
|
||||
"goimports",
|
||||
"golint",
|
||||
"gomnd",
|
||||
# "gomoddirectives",
|
||||
"gomodguard",
|
||||
"goprintffuncname",
|
||||
"gosec",
|
||||
"gosimple",
|
||||
"govet",
|
||||
# "ifshort",
|
||||
"importas",
|
||||
"ineffassign",
|
||||
"lll",
|
||||
"makezero",
|
||||
"misspell",
|
||||
"nakedret",
|
||||
"nestif",
|
||||
"nilerr",
|
||||
# "nlreturn",
|
||||
"noctx",
|
||||
"nolintlint",
|
||||
#"paralleltest",
|
||||
"prealloc",
|
||||
"predeclared",
|
||||
"revive",
|
||||
"rowserrcheck",
|
||||
"sqlclosecheck",
|
||||
"staticcheck",
|
||||
"structcheck",
|
||||
"stylecheck",
|
||||
# "testpackage",
|
||||
"thelper",
|
||||
"tparallel",
|
||||
"typecheck",
|
||||
"unconvert",
|
||||
"unparam",
|
||||
"unused",
|
||||
"varcheck",
|
||||
"wastedassign",
|
||||
"whitespace",
|
||||
# "wrapcheck",
|
||||
# "wsl"
|
||||
]
|
||||
127
vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
generated
vendored
Normal file
127
vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
version: 2
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go fmt ./...
|
||||
- go test ./...
|
||||
builds:
|
||||
- id: tomll
|
||||
main: ./cmd/tomll
|
||||
binary: tomll
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm
|
||||
- linux_riscv64
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- windows_arm
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- id: tomljson
|
||||
main: ./cmd/tomljson
|
||||
binary: tomljson
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm
|
||||
- linux_riscv64
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- windows_arm
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- id: jsontoml
|
||||
main: ./cmd/jsontoml
|
||||
binary: jsontoml
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_riscv64
|
||||
- linux_arm
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- windows_arm
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
universal_binaries:
|
||||
- id: tomll
|
||||
replace: true
|
||||
name_template: tomll
|
||||
- id: tomljson
|
||||
replace: true
|
||||
name_template: tomljson
|
||||
- id: jsontoml
|
||||
replace: true
|
||||
name_template: jsontoml
|
||||
archives:
|
||||
- id: jsontoml
|
||||
format: tar.xz
|
||||
builds:
|
||||
- jsontoml
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
- id: tomljson
|
||||
format: tar.xz
|
||||
builds:
|
||||
- tomljson
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
- id: tomll
|
||||
format: tar.xz
|
||||
builds:
|
||||
- tomll
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
dockers:
|
||||
- id: tools
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
ids:
|
||||
- jsontoml
|
||||
- tomljson
|
||||
- tomll
|
||||
image_templates:
|
||||
- "ghcr.io/pelletier/go-toml:latest"
|
||||
- "ghcr.io/pelletier/go-toml:{{ .Tag }}"
|
||||
- "ghcr.io/pelletier/go-toml:v{{ .Major }}"
|
||||
skip_push: false
|
||||
checksum:
|
||||
name_template: 'sha256sums.txt'
|
||||
snapshot:
|
||||
version_template: "{{ incpatch .Version }}-next"
|
||||
release:
|
||||
github:
|
||||
owner: pelletier
|
||||
name: go-toml
|
||||
draft: true
|
||||
prerelease: auto
|
||||
mode: replace
|
||||
changelog:
|
||||
use: github-native
|
||||
announce:
|
||||
skip: true
|
||||
193
vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md
generated
vendored
Normal file
193
vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
# Contributing
|
||||
|
||||
Thank you for your interest in go-toml! We appreciate you considering
|
||||
contributing to go-toml!
|
||||
|
||||
The main goal is the project is to provide an easy-to-use and efficient TOML
|
||||
implementation for Go that gets the job done and gets out of your way – dealing
|
||||
with TOML is probably not the central piece of your project.
|
||||
|
||||
As the single maintainer of go-toml, time is scarce. All help, big or small, is
|
||||
more than welcomed!
|
||||
|
||||
## Ask questions
|
||||
|
||||
Any question you may have, somebody else might have it too. Always feel free to
|
||||
ask them on the [discussion board][discussions]. We will try to answer them as
|
||||
clearly and quickly as possible, time permitting.
|
||||
|
||||
Asking questions also helps us identify areas where the documentation needs
|
||||
improvement, or new features that weren't envisioned before. Sometimes, a
|
||||
seemingly innocent question leads to the fix of a bug. Don't hesitate and ask
|
||||
away!
|
||||
|
||||
[discussions]: https://github.com/pelletier/go-toml/discussions
|
||||
|
||||
## Improve the documentation
|
||||
|
||||
The best way to share your knowledge and experience with go-toml is to improve
|
||||
the documentation. Fix a typo, clarify an interface, add an example, anything
|
||||
goes!
|
||||
|
||||
The documentation is present in the [README][readme] and thorough the source
|
||||
code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change
|
||||
to the documentation, create a pull request with your proposed changes. For
|
||||
simple changes like that, the easiest way to go is probably the "Fork this
|
||||
project and edit the file" button on Github, displayed at the top right of the
|
||||
file. Unless it's a trivial change (for example a typo), provide a little bit of
|
||||
context in your pull request description or commit message.
|
||||
|
||||
## Report a bug
|
||||
|
||||
Found a bug! Sorry to hear that :(. Help us and other track them down and fix by
|
||||
reporting it. [File a new bug report][bug-report] on the [issues
|
||||
tracker][issues-tracker]. The template should provide enough guidance on what to
|
||||
include. When in doubt: add more details! By reducing ambiguity and providing
|
||||
more information, it decreases back and forth and saves everyone time.
|
||||
|
||||
## Code changes
|
||||
|
||||
Want to contribute a patch? Very happy to hear that!
|
||||
|
||||
First, some high-level rules:
|
||||
|
||||
- A short proposal with some POC code is better than a lengthy piece of text
|
||||
with no code. Code speaks louder than words. That being said, bigger changes
|
||||
should probably start with a [discussion][discussions].
|
||||
- No backward-incompatible patch will be accepted unless discussed. Sometimes
|
||||
it's hard, but we try not to break people's programs unless we absolutely have
|
||||
to.
|
||||
- If you are writing a new feature or extending an existing one, make sure to
|
||||
write some documentation.
|
||||
- Bug fixes need to be accompanied with regression tests.
|
||||
- New code needs to be tested.
|
||||
- Your commit messages need to explain why the change is needed, even if already
|
||||
included in the PR description.
|
||||
|
||||
It does sound like a lot, but those best practices are here to save time overall
|
||||
and continuously improve the quality of the project, which is something everyone
|
||||
benefits from.
|
||||
|
||||
### Get started
|
||||
|
||||
The fairly standard code contribution process looks like that:
|
||||
|
||||
1. [Fork the project][fork].
|
||||
2. Make your changes, commit on any branch you like.
|
||||
3. [Open up a pull request][pull-request]
|
||||
4. Review, potential ask for changes.
|
||||
5. Merge.
|
||||
|
||||
Feel free to ask for help! You can create draft pull requests to gather
|
||||
some early feedback!
|
||||
|
||||
### Run the tests
|
||||
|
||||
You can run tests for go-toml using Go's test tool: `go test -race ./...`.
|
||||
|
||||
During the pull request process, all tests will be ran on Linux, Windows, and
|
||||
MacOS on the last two versions of Go.
|
||||
|
||||
However, given GitHub's new policy to _not_ run Actions on pull requests until a
|
||||
maintainer clicks on button, it is highly recommended that you run them locally
|
||||
as you make changes.
|
||||
|
||||
### Check coverage
|
||||
|
||||
We use `go tool cover` to compute test coverage. Most code editors have a way to
|
||||
run and display code coverage, but at the end of the day, we do this:
|
||||
|
||||
```
|
||||
go test -covermode=atomic -coverprofile=coverage.out
|
||||
go tool cover -func=coverage.out
|
||||
```
|
||||
|
||||
and verify that the overall percentage of tested code does not go down. This is
|
||||
a requirement. As a rule of thumb, all lines of code touched by your changes
|
||||
should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your
|
||||
code lowers the coverage.
|
||||
|
||||
### Verify performance
|
||||
|
||||
Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's
|
||||
builtin benchmark systems. Because of their noisy nature, containers provided by
|
||||
Github Actions cannot be reliably used for benchmarking. As a result, you are
|
||||
responsible for checking that your changes do not incur a performance penalty.
|
||||
You can run their following to execute benchmarks:
|
||||
|
||||
```
|
||||
go test ./... -bench=. -count=10
|
||||
```
|
||||
|
||||
Benchmark results should be compared against each other with
|
||||
[benchstat][benchstat]. Typical flow looks like this:
|
||||
|
||||
1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to
|
||||
a file (for example `old.txt`).
|
||||
2. Make some code changes.
|
||||
3. Run `go test ....` again, and save the output to an other file (for example
|
||||
`new.txt`).
|
||||
4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any
|
||||
test.
|
||||
|
||||
On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts
|
||||
performance.
|
||||
|
||||
It is highly encouraged to add the benchstat results to your pull request
|
||||
description. Pull requests that lower performance will receive more scrutiny.
|
||||
|
||||
[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat
|
||||
|
||||
### Style
|
||||
|
||||
Try to look around and follow the same format and structure as the rest of the
|
||||
code. We enforce using `go fmt` on the whole code base.
|
||||
|
||||
---
|
||||
|
||||
## Maintainers-only
|
||||
|
||||
### Merge pull request
|
||||
|
||||
Checklist:
|
||||
|
||||
- Passing CI.
|
||||
- Does not introduce backward-incompatible changes (unless discussed).
|
||||
- Has relevant doc changes.
|
||||
- Benchstat does not show performance regression.
|
||||
- Pull request is [labeled appropriately][pr-labels].
|
||||
- Title will be understandable in the changelog.
|
||||
|
||||
1. Merge using "squash and merge".
|
||||
2. Make sure to edit the commit message to keep all the useful information
|
||||
nice and clean.
|
||||
3. Make sure the commit title is clear and contains the PR number (#123).
|
||||
|
||||
### New release
|
||||
|
||||
1. Decide on the next version number. Use semver. Review commits since last
|
||||
version to assess.
|
||||
2. Tag release. For example:
|
||||
```
|
||||
git checkout v2
|
||||
git pull
|
||||
git tag v2.2.0
|
||||
git push --tags
|
||||
```
|
||||
3. CI automatically builds a draft Github release. Review it and edit as
|
||||
necessary. Look for "Other changes". That would indicate a pull request not
|
||||
labeled properly. Tweak labels and pull request titles until changelog looks
|
||||
good for users.
|
||||
4. Check "create discussion" box, in the "Releases" category.
|
||||
5. If new version is an alpha or beta only, check pre-release box.
|
||||
|
||||
|
||||
[issues-tracker]: https://github.com/pelletier/go-toml/issues
|
||||
[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md
|
||||
[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml
|
||||
[readme]: ./README.md
|
||||
[fork]: https://help.github.com/articles/fork-a-repo
|
||||
[pull-request]: https://help.github.com/en/articles/creating-a-pull-request
|
||||
[new-release]: https://github.com/pelletier/go-toml/releases/new
|
||||
[gh]: https://github.com/cli/cli
|
||||
[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml
|
||||
5
vendor/github.com/pelletier/go-toml/v2/Dockerfile
generated
vendored
Normal file
5
vendor/github.com/pelletier/go-toml/v2/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM scratch
|
||||
ENV PATH "$PATH:/bin"
|
||||
COPY tomll /bin/tomll
|
||||
COPY tomljson /bin/tomljson
|
||||
COPY jsontoml /bin/jsontoml
|
||||
22
vendor/github.com/pelletier/go-toml/v2/LICENSE
generated
vendored
Normal file
22
vendor/github.com/pelletier/go-toml/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
go-toml v2
|
||||
Copyright (c) 2021 - 2023 Thomas Pelletier
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
576
vendor/github.com/pelletier/go-toml/v2/README.md
generated
vendored
Normal file
576
vendor/github.com/pelletier/go-toml/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,576 @@
|
||||
# go-toml v2
|
||||
|
||||
Go library for the [TOML](https://toml.io/en/) format.
|
||||
|
||||
This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues)
|
||||
|
||||
[💬 Anything else](https://github.com/pelletier/go-toml/discussions)
|
||||
|
||||
## Documentation
|
||||
|
||||
Full API, examples, and implementation notes are available in the Go
|
||||
documentation.
|
||||
|
||||
[](https://pkg.go.dev/github.com/pelletier/go-toml/v2)
|
||||
|
||||
## Import
|
||||
|
||||
```go
|
||||
import "github.com/pelletier/go-toml/v2"
|
||||
```
|
||||
|
||||
See [Modules](#Modules).
|
||||
|
||||
## Features
|
||||
|
||||
### Stdlib behavior
|
||||
|
||||
As much as possible, this library is designed to behave similarly as the
|
||||
standard library's `encoding/json`.
|
||||
|
||||
### Performance
|
||||
|
||||
While go-toml favors usability, it is written with performance in mind. Most
|
||||
operations should not be shockingly slow. See [benchmarks](#benchmarks).
|
||||
|
||||
### Strict mode
|
||||
|
||||
`Decoder` can be set to "strict mode", which makes it error when some parts of
|
||||
the TOML document was not present in the target structure. This is a great way
|
||||
to check for typos. [See example in the documentation][strict].
|
||||
|
||||
[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields
|
||||
|
||||
### Contextualized errors
|
||||
|
||||
When most decoding errors occur, go-toml returns [`DecodeError`][decode-err],
|
||||
which contains a human readable contextualized version of the error. For
|
||||
example:
|
||||
|
||||
```
|
||||
1| [server]
|
||||
2| path = 100
|
||||
| ~~~ cannot decode TOML integer into struct field toml_test.Server.Path of type string
|
||||
3| port = 50
|
||||
```
|
||||
|
||||
[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError
|
||||
|
||||
### Local date and time support
|
||||
|
||||
TOML supports native [local date/times][ldt]. It allows to represent a given
|
||||
date, time, or date-time without relation to a timezone or offset. To support
|
||||
this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and
|
||||
[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`,
|
||||
making them convenient yet unambiguous structures for their respective TOML
|
||||
representation.
|
||||
|
||||
[ldt]: https://toml.io/en/v1.0.0#local-date-time
|
||||
[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate
|
||||
[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime
|
||||
[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime
|
||||
|
||||
### Commented config
|
||||
|
||||
Since TOML is often used for configuration files, go-toml can emit documents
|
||||
annotated with [comments and commented-out values][comments-example]. For
|
||||
example, it can generate the following file:
|
||||
|
||||
```toml
|
||||
# Host IP to connect to.
|
||||
host = '127.0.0.1'
|
||||
# Port of the remote server.
|
||||
port = 4242
|
||||
|
||||
# Encryption parameters (optional)
|
||||
# [TLS]
|
||||
# cipher = 'AEAD-AES128-GCM-SHA256'
|
||||
# version = 'TLS 1.3'
|
||||
```
|
||||
|
||||
[comments-example]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Marshal-Commented
|
||||
|
||||
## Getting started
|
||||
|
||||
Given the following struct, let's see how to read it and write it as TOML:
|
||||
|
||||
```go
|
||||
type MyConfig struct {
|
||||
Version int
|
||||
Name string
|
||||
Tags []string
|
||||
}
|
||||
```
|
||||
|
||||
### Unmarshaling
|
||||
|
||||
[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its
|
||||
content. For example:
|
||||
|
||||
```go
|
||||
doc := `
|
||||
version = 2
|
||||
name = "go-toml"
|
||||
tags = ["go", "toml"]
|
||||
`
|
||||
|
||||
var cfg MyConfig
|
||||
err := toml.Unmarshal([]byte(doc), &cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println("version:", cfg.Version)
|
||||
fmt.Println("name:", cfg.Name)
|
||||
fmt.Println("tags:", cfg.Tags)
|
||||
|
||||
// Output:
|
||||
// version: 2
|
||||
// name: go-toml
|
||||
// tags: [go toml]
|
||||
```
|
||||
|
||||
[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal
|
||||
|
||||
### Marshaling
|
||||
|
||||
[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure
|
||||
as a TOML document:
|
||||
|
||||
```go
|
||||
cfg := MyConfig{
|
||||
Version: 2,
|
||||
Name: "go-toml",
|
||||
Tags: []string{"go", "toml"},
|
||||
}
|
||||
|
||||
b, err := toml.Marshal(cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
|
||||
// Output:
|
||||
// Version = 2
|
||||
// Name = 'go-toml'
|
||||
// Tags = ['go', 'toml']
|
||||
```
|
||||
|
||||
[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal
|
||||
|
||||
## Unstable API
|
||||
|
||||
This API does not yet follow the backward compatibility guarantees of this
|
||||
library. They provide early access to features that may have rough edges or an
|
||||
API subject to change.
|
||||
|
||||
### Parser
|
||||
|
||||
Parser is the unstable API that allows iterative parsing of a TOML document at
|
||||
the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Execution time speedup compared to other Go TOML libraries:
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td>Marshal/HugoFrontMatter-2</td><td>1.9x</td><td>2.2x</td></tr>
|
||||
<tr><td>Marshal/ReferenceFile/map-2</td><td>1.7x</td><td>2.1x</td></tr>
|
||||
<tr><td>Marshal/ReferenceFile/struct-2</td><td>2.2x</td><td>3.0x</td></tr>
|
||||
<tr><td>Unmarshal/HugoFrontMatter-2</td><td>2.9x</td><td>2.7x</td></tr>
|
||||
<tr><td>Unmarshal/ReferenceFile/map-2</td><td>2.6x</td><td>2.7x</td></tr>
|
||||
<tr><td>Unmarshal/ReferenceFile/struct-2</td><td>4.6x</td><td>5.1x</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<details><summary>See more</summary>
|
||||
<p>The table above has the results of the most common use-cases. The table below
|
||||
contains the results of all benchmarks, including unrealistic ones. It is
|
||||
provided for completeness.</p>
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td>Marshal/SimpleDocument/map-2</td><td>1.8x</td><td>2.7x</td></tr>
|
||||
<tr><td>Marshal/SimpleDocument/struct-2</td><td>2.7x</td><td>3.8x</td></tr>
|
||||
<tr><td>Unmarshal/SimpleDocument/map-2</td><td>3.8x</td><td>3.0x</td></tr>
|
||||
<tr><td>Unmarshal/SimpleDocument/struct-2</td><td>5.6x</td><td>4.1x</td></tr>
|
||||
<tr><td>UnmarshalDataset/example-2</td><td>3.0x</td><td>3.2x</td></tr>
|
||||
<tr><td>UnmarshalDataset/code-2</td><td>2.3x</td><td>2.9x</td></tr>
|
||||
<tr><td>UnmarshalDataset/twitter-2</td><td>2.6x</td><td>2.7x</td></tr>
|
||||
<tr><td>UnmarshalDataset/citm_catalog-2</td><td>2.2x</td><td>2.3x</td></tr>
|
||||
<tr><td>UnmarshalDataset/canada-2</td><td>1.8x</td><td>1.5x</td></tr>
|
||||
<tr><td>UnmarshalDataset/config-2</td><td>4.1x</td><td>2.9x</td></tr>
|
||||
<tr><td>geomean</td><td>2.7x</td><td>2.8x</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p>
|
||||
</details>
|
||||
|
||||
## Modules
|
||||
|
||||
go-toml uses Go's standard modules system.
|
||||
|
||||
Installation instructions:
|
||||
|
||||
- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals
|
||||
with it automatically.
|
||||
- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`.
|
||||
|
||||
In case of trouble: [Go Modules FAQ][mod-faq].
|
||||
|
||||
[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module
|
||||
|
||||
## Tools
|
||||
|
||||
Go-toml provides three handy command line tools:
|
||||
|
||||
* `tomljson`: Reads a TOML file and outputs its JSON representation.
|
||||
|
||||
```
|
||||
$ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest
|
||||
$ tomljson --help
|
||||
```
|
||||
|
||||
* `jsontoml`: Reads a JSON file and outputs a TOML representation.
|
||||
|
||||
```
|
||||
$ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest
|
||||
$ jsontoml --help
|
||||
```
|
||||
|
||||
* `tomll`: Lints and reformats a TOML file.
|
||||
|
||||
```
|
||||
$ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest
|
||||
$ tomll --help
|
||||
```
|
||||
|
||||
### Docker image
|
||||
|
||||
Those tools are also available as a [Docker image][docker]. For example, to use
|
||||
`tomljson`:
|
||||
|
||||
```
|
||||
docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml
|
||||
```
|
||||
|
||||
Multiple versions are available on [ghcr.io][docker].
|
||||
|
||||
[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml
|
||||
|
||||
## Migrating from v1
|
||||
|
||||
This section describes the differences between v1 and v2, with some pointers on
|
||||
how to get the original behavior when possible.
|
||||
|
||||
### Decoding / Unmarshal
|
||||
|
||||
#### Automatic field name guessing
|
||||
|
||||
When unmarshaling to a struct, if a key in the TOML document does not exactly
|
||||
match the name of a struct field or any of the `toml`-tagged field, v1 tries
|
||||
multiple variations of the key ([code][v1-keys]).
|
||||
|
||||
V2 instead does a case-insensitive matching, like `encoding/json`.
|
||||
|
||||
This could impact you if you are relying on casing to differentiate two fields,
|
||||
and one of them is a not using the `toml` struct tag. The recommended solution
|
||||
is to be specific about tag names for those fields using the `toml` struct tag.
|
||||
|
||||
[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781
|
||||
|
||||
#### Ignore preexisting value in interface
|
||||
|
||||
When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the
|
||||
element in the interface to decode the object. For example:
|
||||
|
||||
```go
|
||||
type inner struct {
|
||||
B interface{}
|
||||
}
|
||||
type doc struct {
|
||||
A interface{}
|
||||
}
|
||||
|
||||
d := doc{
|
||||
A: inner{
|
||||
B: "Before",
|
||||
},
|
||||
}
|
||||
|
||||
data := `
|
||||
[A]
|
||||
B = "After"
|
||||
`
|
||||
|
||||
toml.Unmarshal([]byte(data), &d)
|
||||
fmt.Printf("toml v1: %#v\n", d)
|
||||
|
||||
// toml v1: main.doc{A:main.inner{B:"After"}}
|
||||
```
|
||||
|
||||
In this case, field `A` is of type `interface{}`, containing a `inner` struct.
|
||||
V1 sees that type and uses it when decoding the object.
|
||||
|
||||
When decoding an object into an `interface{}`, V2 instead disregards whatever
|
||||
value the `interface{}` may contain and replaces it with a
|
||||
`map[string]interface{}`. With the same data structure as above, here is what
|
||||
the result looks like:
|
||||
|
||||
```go
|
||||
toml.Unmarshal([]byte(data), &d)
|
||||
fmt.Printf("toml v2: %#v\n", d)
|
||||
|
||||
// toml v2: main.doc{A:map[string]interface {}{"B":"After"}}
|
||||
```
|
||||
|
||||
This is to match `encoding/json`'s behavior. There is no way to make the v2
|
||||
decoder behave like v1.
|
||||
|
||||
#### Values out of array bounds ignored
|
||||
|
||||
When decoding into an array, v1 returns an error when the number of elements
|
||||
contained in the doc is superior to the capacity of the array. For example:
|
||||
|
||||
```go
|
||||
type doc struct {
|
||||
A [2]string
|
||||
}
|
||||
d := doc{}
|
||||
err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d)
|
||||
fmt.Println(err)
|
||||
|
||||
// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2)
|
||||
```
|
||||
|
||||
In the same situation, v2 ignores the last value:
|
||||
|
||||
```go
|
||||
err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d)
|
||||
fmt.Println("err:", err, "d:", d)
|
||||
// err: <nil> d: {[one two]}
|
||||
```
|
||||
|
||||
This is to match `encoding/json`'s behavior. There is no way to make the v2
|
||||
decoder behave like v1.
|
||||
|
||||
#### Support for `toml.Unmarshaler` has been dropped
|
||||
|
||||
This method was not widely used, poorly defined, and added a lot of complexity.
|
||||
A similar effect can be achieved by implementing the `encoding.TextUnmarshaler`
|
||||
interface and use strings.
|
||||
|
||||
#### Support for `default` struct tag has been dropped
|
||||
|
||||
This feature adds complexity and a poorly defined API for an effect that can be
|
||||
accomplished outside of the library.
|
||||
|
||||
It does not seem like other format parsers in Go support that feature (the
|
||||
project referenced in the original ticket #202 has not been updated since 2017).
|
||||
Given that go-toml v2 should not touch values not in the document, the same
|
||||
effect can be achieved by pre-filling the struct with defaults (libraries like
|
||||
[go-defaults][go-defaults] can help). Also, string representation is not well
|
||||
defined for all types: it creates issues like #278.
|
||||
|
||||
The recommended replacement is pre-filling the struct before unmarshaling.
|
||||
|
||||
[go-defaults]: https://github.com/mcuadros/go-defaults
|
||||
|
||||
#### `toml.Tree` replacement
|
||||
|
||||
This structure was the initial attempt at providing a document model for
|
||||
go-toml. It allows manipulating the structure of any document, encoding and
|
||||
decoding from their TOML representation. While a more robust feature was
|
||||
initially planned in go-toml v2, this has been ultimately [removed from
|
||||
scope][nodoc] of this library, with no plan to add it back at the moment. The
|
||||
closest equivalent at the moment would be to unmarshal into an `interface{}` and
|
||||
use type assertions and/or reflection to manipulate the arbitrary
|
||||
structure. However this would fall short of providing all of the TOML features
|
||||
such as adding comments and be specific about whitespace.
|
||||
|
||||
|
||||
#### `toml.Position` are not retrievable anymore
|
||||
|
||||
The API for retrieving the position (line, column) of a specific TOML element do
|
||||
not exist anymore. This was done to minimize the amount of concepts introduced
|
||||
by the library (query path), and avoid the performance hit related to storing
|
||||
positions in the absence of a document model, for a feature that seemed to have
|
||||
little use. Errors however have gained more detailed position
|
||||
information. Position retrieval seems better fitted for a document model, which
|
||||
has been [removed from the scope][nodoc] of go-toml v2 at the moment.
|
||||
|
||||
### Encoding / Marshal
|
||||
|
||||
#### Default struct fields order
|
||||
|
||||
V1 emits struct fields order alphabetically by default. V2 struct fields are
|
||||
emitted in order they are defined. For example:
|
||||
|
||||
```go
|
||||
type S struct {
|
||||
B string
|
||||
A string
|
||||
}
|
||||
|
||||
data := S{
|
||||
B: "B",
|
||||
A: "A",
|
||||
}
|
||||
|
||||
b, _ := tomlv1.Marshal(data)
|
||||
fmt.Println("v1:\n" + string(b))
|
||||
|
||||
b, _ = tomlv2.Marshal(data)
|
||||
fmt.Println("v2:\n" + string(b))
|
||||
|
||||
// Output:
|
||||
// v1:
|
||||
// A = "A"
|
||||
// B = "B"
|
||||
|
||||
// v2:
|
||||
// B = 'B'
|
||||
// A = 'A'
|
||||
```
|
||||
|
||||
There is no way to make v2 encoder behave like v1. A workaround could be to
|
||||
manually sort the fields alphabetically in the struct definition, or generate
|
||||
struct types using `reflect.StructOf`.
|
||||
|
||||
#### No indentation by default
|
||||
|
||||
V1 automatically indents content of tables by default. V2 does not. However the
|
||||
same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example:
|
||||
|
||||
```go
|
||||
data := map[string]interface{}{
|
||||
"table": map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
}
|
||||
|
||||
b, _ := tomlv1.Marshal(data)
|
||||
fmt.Println("v1:\n" + string(b))
|
||||
|
||||
b, _ = tomlv2.Marshal(data)
|
||||
fmt.Println("v2:\n" + string(b))
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
enc := tomlv2.NewEncoder(&buf)
|
||||
enc.SetIndentTables(true)
|
||||
enc.Encode(data)
|
||||
fmt.Println("v2 Encoder:\n" + string(buf.Bytes()))
|
||||
|
||||
// Output:
|
||||
// v1:
|
||||
//
|
||||
// [table]
|
||||
// key = "value"
|
||||
//
|
||||
// v2:
|
||||
// [table]
|
||||
// key = 'value'
|
||||
//
|
||||
//
|
||||
// v2 Encoder:
|
||||
// [table]
|
||||
// key = 'value'
|
||||
```
|
||||
|
||||
[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables
|
||||
|
||||
#### Keys and strings are single quoted
|
||||
|
||||
V1 always uses double quotes (`"`) around strings and keys that cannot be
|
||||
represented bare (unquoted). V2 uses single quotes instead by default (`'`),
|
||||
unless a character cannot be represented, then falls back to double quotes. As a
|
||||
result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not
|
||||
useful anymore.
|
||||
|
||||
There is no way to make v2 encoder behave like v1.
|
||||
|
||||
#### `TextMarshaler` emits as a string, not TOML
|
||||
|
||||
Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in
|
||||
v1. The encoder would append the result to the output directly. In v2 the result
|
||||
is wrapped in a string. As a result, this interface cannot be implemented by the
|
||||
root object.
|
||||
|
||||
There is no way to make v2 encoder behave like v1.
|
||||
|
||||
[tm]: https://golang.org/pkg/encoding/#TextMarshaler
|
||||
|
||||
#### `Encoder.CompactComments` has been removed
|
||||
|
||||
Emitting compact comments is now the default behavior of go-toml. This option
|
||||
is not necessary anymore.
|
||||
|
||||
#### Struct tags have been merged
|
||||
|
||||
V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`,
|
||||
`toml`, and `omitempty`. To behave more like the standard library, v2 has merged
|
||||
`toml`, `multiline`, `commented`, and `omitempty`. For example:
|
||||
|
||||
```go
|
||||
type doc struct {
|
||||
// v1
|
||||
F string `toml:"field" multiline:"true" omitempty:"true" commented:"true"`
|
||||
// v2
|
||||
F string `toml:"field,multiline,omitempty,commented"`
|
||||
}
|
||||
```
|
||||
|
||||
Has a result, the `Encoder.SetTag*` methods have been removed, as there is just
|
||||
one tag now.
|
||||
|
||||
#### `Encoder.ArraysWithOneElementPerLine` has been renamed
|
||||
|
||||
The new name is `Encoder.SetArraysMultiline`. The behavior should be the same.
|
||||
|
||||
#### `Encoder.Indentation` has been renamed
|
||||
|
||||
The new name is `Encoder.SetIndentSymbol`. The behavior should be the same.
|
||||
|
||||
|
||||
#### Embedded structs behave like stdlib
|
||||
|
||||
V1 defaults to merging embedded struct fields into the embedding struct. This
|
||||
behavior was unexpected because it does not follow the standard library. To
|
||||
avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was
|
||||
added to make the encoder behave correctly. Given backward compatibility is not
|
||||
a problem anymore, v2 does the right thing by default: it follows the behavior
|
||||
of `encoding/json`. `Encoder.PromoteAnonymous` has been removed.
|
||||
|
||||
[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038
|
||||
|
||||
### `query`
|
||||
|
||||
go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run
|
||||
JSONPath-style queries on TOML files. This feature is not available in v2. For a
|
||||
replacement, check out [dasel][dasel].
|
||||
|
||||
This package has been removed because it was essentially not supported anymore
|
||||
(last commit May 2020), increased the complexity of the code base, and more
|
||||
complete solutions exist out there.
|
||||
|
||||
[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query
|
||||
[dasel]: https://github.com/TomWright/dasel
|
||||
|
||||
## Versioning
|
||||
|
||||
Expect for parts explicitly marked otherwise, go-toml follows [Semantic
|
||||
Versioning](https://semver.org). The supported version of
|
||||
[TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this
|
||||
document. The last two major versions of Go are supported (see [Go Release
|
||||
Policy](https://golang.org/doc/devel/release.html#policy)).
|
||||
|
||||
## License
|
||||
|
||||
The MIT License (MIT). Read [LICENSE](LICENSE).
|
||||
16
vendor/github.com/pelletier/go-toml/v2/SECURITY.md
generated
vendored
Normal file
16
vendor/github.com/pelletier/go-toml/v2/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ---------- | ------------------ |
|
||||
| Latest 2.x | :white_check_mark: |
|
||||
| All 1.x | :x: |
|
||||
| All 0.x | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Email a vulnerability report to `security@pelletier.codes`. Make sure to include
|
||||
as many details as possible to reproduce the vulnerability. This is a
|
||||
side-project: I will try to get back to you as quickly as possible, time
|
||||
permitting in my personal life. Providing a working patch helps very much!
|
||||
284
vendor/github.com/pelletier/go-toml/v2/ci.sh
generated
vendored
Normal file
284
vendor/github.com/pelletier/go-toml/v2/ci.sh
generated
vendored
Normal file
@@ -0,0 +1,284 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
stderr() {
|
||||
echo "$@" 1>&2
|
||||
}
|
||||
|
||||
usage() {
|
||||
b=$(basename "$0")
|
||||
echo $b: ERROR: "$@" 1>&2
|
||||
|
||||
cat 1>&2 <<EOF
|
||||
|
||||
DESCRIPTION
|
||||
|
||||
$(basename "$0") is the script to run continuous integration commands for
|
||||
go-toml on unix.
|
||||
|
||||
Requires Go and Git to be available in the PATH. Expects to be ran from the
|
||||
root of go-toml's Git repository.
|
||||
|
||||
USAGE
|
||||
|
||||
$b COMMAND [OPTIONS...]
|
||||
|
||||
COMMANDS
|
||||
|
||||
benchmark [OPTIONS...] [BRANCH]
|
||||
|
||||
Run benchmarks.
|
||||
|
||||
ARGUMENTS
|
||||
|
||||
BRANCH Optional. Defines which Git branch to use when running
|
||||
benchmarks.
|
||||
|
||||
OPTIONS
|
||||
|
||||
-d Compare benchmarks of HEAD with BRANCH using benchstats. In
|
||||
this form the BRANCH argument is required.
|
||||
|
||||
-a Compare benchmarks of HEAD against go-toml v1 and
|
||||
BurntSushi/toml.
|
||||
|
||||
-html When used with -a, emits the output as HTML, ready to be
|
||||
embedded in the README.
|
||||
|
||||
coverage [OPTIONS...] [BRANCH]
|
||||
|
||||
Generates code coverage.
|
||||
|
||||
ARGUMENTS
|
||||
|
||||
BRANCH Optional. Defines which Git branch to use when reporting
|
||||
coverage. Defaults to HEAD.
|
||||
|
||||
OPTIONS
|
||||
|
||||
-d Compare coverage of HEAD with the one of BRANCH. In this form,
|
||||
the BRANCH argument is required. Exit code is non-zero when
|
||||
coverage percentage decreased.
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
cover() {
|
||||
branch="${1}"
|
||||
dir="$(mktemp -d)"
|
||||
|
||||
stderr "Executing coverage for ${branch} at ${dir}"
|
||||
|
||||
if [ "${branch}" = "HEAD" ]; then
|
||||
cp -r . "${dir}/"
|
||||
else
|
||||
git worktree add "$dir" "$branch"
|
||||
fi
|
||||
|
||||
pushd "$dir"
|
||||
go test -covermode=atomic -coverpkg=./... -coverprofile=coverage.out.tmp ./...
|
||||
grep -Ev '(fuzz|testsuite|tomltestgen|gotoml-test-decoder|gotoml-test-encoder)' coverage.out.tmp > coverage.out
|
||||
go tool cover -func=coverage.out
|
||||
echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2
|
||||
popd
|
||||
|
||||
if [ "${branch}" != "HEAD" ]; then
|
||||
git worktree remove --force "$dir"
|
||||
fi
|
||||
}
|
||||
|
||||
coverage() {
|
||||
case "$1" in
|
||||
-d)
|
||||
shift
|
||||
target="${1?Need to provide a target branch argument}"
|
||||
|
||||
output_dir="$(mktemp -d)"
|
||||
target_out="${output_dir}/target.txt"
|
||||
head_out="${output_dir}/head.txt"
|
||||
|
||||
cover "${target}" > "${target_out}"
|
||||
cover "HEAD" > "${head_out}"
|
||||
|
||||
cat "${target_out}"
|
||||
cat "${head_out}"
|
||||
|
||||
echo ""
|
||||
|
||||
target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')"
|
||||
head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')"
|
||||
echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%"
|
||||
|
||||
delta_pct=$(echo "$head_pct - $target_pct" | bc -l)
|
||||
echo "Delta: ${delta_pct}"
|
||||
|
||||
if [[ $delta_pct = \-* ]]; then
|
||||
echo "Regression!";
|
||||
|
||||
target_diff="${output_dir}/target.diff.txt"
|
||||
head_diff="${output_dir}/head.diff.txt"
|
||||
cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}"
|
||||
cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}"
|
||||
|
||||
diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
cover "${1-HEAD}"
|
||||
}
|
||||
|
||||
bench() {
|
||||
branch="${1}"
|
||||
out="${2}"
|
||||
replace="${3}"
|
||||
dir="$(mktemp -d)"
|
||||
|
||||
stderr "Executing benchmark for ${branch} at ${dir}"
|
||||
|
||||
if [ "${branch}" = "HEAD" ]; then
|
||||
cp -r . "${dir}/"
|
||||
else
|
||||
git worktree add "$dir" "$branch"
|
||||
fi
|
||||
|
||||
pushd "$dir"
|
||||
|
||||
if [ "${replace}" != "" ]; then
|
||||
find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \;
|
||||
go get "${replace}"
|
||||
fi
|
||||
|
||||
export GOMAXPROCS=2
|
||||
go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}"
|
||||
popd
|
||||
|
||||
if [ "${branch}" != "HEAD" ]; then
|
||||
git worktree remove --force "$dir"
|
||||
fi
|
||||
}
|
||||
|
||||
fmktemp() {
|
||||
if mktemp --version &> /dev/null; then
|
||||
# GNU
|
||||
mktemp --suffix=-$1
|
||||
else
|
||||
# BSD
|
||||
mktemp -t $1
|
||||
fi
|
||||
}
|
||||
|
||||
benchstathtml() {
|
||||
python3 - $1 <<'EOF'
|
||||
import sys
|
||||
|
||||
lines = []
|
||||
stop = False
|
||||
|
||||
with open(sys.argv[1]) as f:
|
||||
for line in f.readlines():
|
||||
line = line.strip()
|
||||
if line == "":
|
||||
stop = True
|
||||
if not stop:
|
||||
lines.append(line.split(','))
|
||||
|
||||
results = []
|
||||
for line in reversed(lines[2:]):
|
||||
if len(line) < 8 or line[0] == "":
|
||||
continue
|
||||
v2 = float(line[1])
|
||||
results.append([
|
||||
line[0].replace("-32", ""),
|
||||
"%.1fx" % (float(line[3])/v2), # v1
|
||||
"%.1fx" % (float(line[7])/v2), # bs
|
||||
])
|
||||
# move geomean to the end
|
||||
results.append(results[0])
|
||||
del results[0]
|
||||
|
||||
|
||||
def printtable(data):
|
||||
print("""
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
|
||||
</thead>
|
||||
<tbody>""")
|
||||
|
||||
for r in data:
|
||||
print(" <tr><td>{}</td><td>{}</td><td>{}</td></tr>".format(*r))
|
||||
|
||||
print(""" </tbody>
|
||||
</table>""")
|
||||
|
||||
|
||||
def match(x):
|
||||
return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0]
|
||||
|
||||
above = [x for x in results if match(x)]
|
||||
below = [x for x in results if not match(x)]
|
||||
|
||||
printtable(above)
|
||||
print("<details><summary>See more</summary>")
|
||||
print("""<p>The table above has the results of the most common use-cases. The table below
|
||||
contains the results of all benchmarks, including unrealistic ones. It is
|
||||
provided for completeness.</p>""")
|
||||
printtable(below)
|
||||
print('<p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p>')
|
||||
print("</details>")
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
benchmark() {
|
||||
case "$1" in
|
||||
-d)
|
||||
shift
|
||||
target="${1?Need to provide a target branch argument}"
|
||||
|
||||
old=`fmktemp ${target}`
|
||||
bench "${target}" "${old}"
|
||||
|
||||
new=`fmktemp HEAD`
|
||||
bench HEAD "${new}"
|
||||
|
||||
benchstat "${old}" "${new}"
|
||||
return 0
|
||||
;;
|
||||
-a)
|
||||
shift
|
||||
|
||||
v2stats=`fmktemp go-toml-v2`
|
||||
bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2"
|
||||
v1stats=`fmktemp go-toml-v1`
|
||||
bench HEAD "${v1stats}" "github.com/pelletier/go-toml"
|
||||
bsstats=`fmktemp bs-toml`
|
||||
bench HEAD "${bsstats}" "github.com/BurntSushi/toml"
|
||||
|
||||
cp "${v2stats}" go-toml-v2.txt
|
||||
cp "${v1stats}" go-toml-v1.txt
|
||||
cp "${bsstats}" bs-toml.txt
|
||||
|
||||
if [ "$1" = "-html" ]; then
|
||||
tmpcsv=`fmktemp csv`
|
||||
benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv
|
||||
benchstathtml $tmpcsv
|
||||
else
|
||||
benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt
|
||||
fi
|
||||
|
||||
rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt
|
||||
return $?
|
||||
esac
|
||||
|
||||
bench "${1-HEAD}" `mktemp`
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
coverage) shift; coverage $@;;
|
||||
benchmark) shift; benchmark $@;;
|
||||
*) usage "bad argument $1";;
|
||||
esac
|
||||
550
vendor/github.com/pelletier/go-toml/v2/decode.go
generated
vendored
Normal file
550
vendor/github.com/pelletier/go-toml/v2/decode.go
generated
vendored
Normal file
@@ -0,0 +1,550 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
func parseInteger(b []byte) (int64, error) {
|
||||
if len(b) > 2 && b[0] == '0' {
|
||||
switch b[1] {
|
||||
case 'x':
|
||||
return parseIntHex(b)
|
||||
case 'b':
|
||||
return parseIntBin(b)
|
||||
case 'o':
|
||||
return parseIntOct(b)
|
||||
default:
|
||||
panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1]))
|
||||
}
|
||||
}
|
||||
|
||||
return parseIntDec(b)
|
||||
}
|
||||
|
||||
func parseLocalDate(b []byte) (LocalDate, error) {
|
||||
// full-date = date-fullyear "-" date-month "-" date-mday
|
||||
// date-fullyear = 4DIGIT
|
||||
// date-month = 2DIGIT ; 01-12
|
||||
// date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year
|
||||
var date LocalDate
|
||||
|
||||
if len(b) != 10 || b[4] != '-' || b[7] != '-' {
|
||||
return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD")
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
date.Year, err = parseDecimalDigits(b[0:4])
|
||||
if err != nil {
|
||||
return LocalDate{}, err
|
||||
}
|
||||
|
||||
date.Month, err = parseDecimalDigits(b[5:7])
|
||||
if err != nil {
|
||||
return LocalDate{}, err
|
||||
}
|
||||
|
||||
date.Day, err = parseDecimalDigits(b[8:10])
|
||||
if err != nil {
|
||||
return LocalDate{}, err
|
||||
}
|
||||
|
||||
if !isValidDate(date.Year, date.Month, date.Day) {
|
||||
return LocalDate{}, unstable.NewParserError(b, "impossible date")
|
||||
}
|
||||
|
||||
return date, nil
|
||||
}
|
||||
|
||||
func parseDecimalDigits(b []byte) (int, error) {
|
||||
v := 0
|
||||
|
||||
for i, c := range b {
|
||||
if c < '0' || c > '9' {
|
||||
return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)")
|
||||
}
|
||||
v *= 10
|
||||
v += int(c - '0')
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func parseDateTime(b []byte) (time.Time, error) {
|
||||
// offset-date-time = full-date time-delim full-time
|
||||
// full-time = partial-time time-offset
|
||||
// time-offset = "Z" / time-numoffset
|
||||
// time-numoffset = ( "+" / "-" ) time-hour ":" time-minute
|
||||
|
||||
dt, b, err := parseLocalDateTime(b)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
var zone *time.Location
|
||||
|
||||
if len(b) == 0 {
|
||||
// parser should have checked that when assigning the date time node
|
||||
panic("date time should have a timezone")
|
||||
}
|
||||
|
||||
if b[0] == 'Z' || b[0] == 'z' {
|
||||
b = b[1:]
|
||||
zone = time.UTC
|
||||
} else {
|
||||
const dateTimeByteLen = 6
|
||||
if len(b) != dateTimeByteLen {
|
||||
return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone")
|
||||
}
|
||||
var direction int
|
||||
switch b[0] {
|
||||
case '-':
|
||||
direction = -1
|
||||
case '+':
|
||||
direction = +1
|
||||
default:
|
||||
return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character")
|
||||
}
|
||||
|
||||
if b[3] != ':' {
|
||||
return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator")
|
||||
}
|
||||
|
||||
hours, err := parseDecimalDigits(b[1:3])
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if hours > 23 {
|
||||
return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours")
|
||||
}
|
||||
|
||||
minutes, err := parseDecimalDigits(b[4:6])
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if minutes > 59 {
|
||||
return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes")
|
||||
}
|
||||
|
||||
seconds := direction * (hours*3600 + minutes*60)
|
||||
if seconds == 0 {
|
||||
zone = time.UTC
|
||||
} else {
|
||||
zone = time.FixedZone("", seconds)
|
||||
}
|
||||
b = b[dateTimeByteLen:]
|
||||
}
|
||||
|
||||
if len(b) > 0 {
|
||||
return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone")
|
||||
}
|
||||
|
||||
t := time.Date(
|
||||
dt.Year,
|
||||
time.Month(dt.Month),
|
||||
dt.Day,
|
||||
dt.Hour,
|
||||
dt.Minute,
|
||||
dt.Second,
|
||||
dt.Nanosecond,
|
||||
zone)
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) {
|
||||
var dt LocalDateTime
|
||||
|
||||
const localDateTimeByteMinLen = 11
|
||||
if len(b) < localDateTimeByteMinLen {
|
||||
return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]")
|
||||
}
|
||||
|
||||
date, err := parseLocalDate(b[:10])
|
||||
if err != nil {
|
||||
return dt, nil, err
|
||||
}
|
||||
dt.LocalDate = date
|
||||
|
||||
sep := b[10]
|
||||
if sep != 'T' && sep != ' ' && sep != 't' {
|
||||
return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space")
|
||||
}
|
||||
|
||||
t, rest, err := parseLocalTime(b[11:])
|
||||
if err != nil {
|
||||
return dt, nil, err
|
||||
}
|
||||
dt.LocalTime = t
|
||||
|
||||
return dt, rest, nil
|
||||
}
|
||||
|
||||
// parseLocalTime is a bit different because it also returns the remaining
|
||||
// []byte that is didn't need. This is to allow parseDateTime to parse those
|
||||
// remaining bytes as a timezone.
|
||||
func parseLocalTime(b []byte) (LocalTime, []byte, error) {
|
||||
var (
|
||||
nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0}
|
||||
t LocalTime
|
||||
)
|
||||
|
||||
// check if b matches to have expected format HH:MM:SS[.NNNNNN]
|
||||
const localTimeByteLen = 8
|
||||
if len(b) < localTimeByteLen {
|
||||
return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]")
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
t.Hour, err = parseDecimalDigits(b[0:2])
|
||||
if err != nil {
|
||||
return t, nil, err
|
||||
}
|
||||
|
||||
if t.Hour > 23 {
|
||||
return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23")
|
||||
}
|
||||
if b[2] != ':' {
|
||||
return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes")
|
||||
}
|
||||
|
||||
t.Minute, err = parseDecimalDigits(b[3:5])
|
||||
if err != nil {
|
||||
return t, nil, err
|
||||
}
|
||||
if t.Minute > 59 {
|
||||
return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59")
|
||||
}
|
||||
if b[5] != ':' {
|
||||
return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds")
|
||||
}
|
||||
|
||||
t.Second, err = parseDecimalDigits(b[6:8])
|
||||
if err != nil {
|
||||
return t, nil, err
|
||||
}
|
||||
|
||||
if t.Second > 60 {
|
||||
return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60")
|
||||
}
|
||||
|
||||
b = b[8:]
|
||||
|
||||
if len(b) >= 1 && b[0] == '.' {
|
||||
frac := 0
|
||||
precision := 0
|
||||
digits := 0
|
||||
|
||||
for i, c := range b[1:] {
|
||||
if !isDigit(c) {
|
||||
if i == 0 {
|
||||
return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point")
|
||||
}
|
||||
break
|
||||
}
|
||||
digits++
|
||||
|
||||
const maxFracPrecision = 9
|
||||
if i >= maxFracPrecision {
|
||||
// go-toml allows decoding fractional seconds
|
||||
// beyond the supported precision of 9
|
||||
// digits. It truncates the fractional component
|
||||
// to the supported precision and ignores the
|
||||
// remaining digits.
|
||||
//
|
||||
// https://github.com/pelletier/go-toml/discussions/707
|
||||
continue
|
||||
}
|
||||
|
||||
frac *= 10
|
||||
frac += int(c - '0')
|
||||
precision++
|
||||
}
|
||||
|
||||
if precision == 0 {
|
||||
return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit")
|
||||
}
|
||||
|
||||
t.Nanosecond = frac * nspow[precision]
|
||||
t.Precision = precision
|
||||
|
||||
return t, b[1+digits:], nil
|
||||
}
|
||||
return t, b, nil
|
||||
}
|
||||
|
||||
//nolint:cyclop
|
||||
func parseFloat(b []byte) (float64, error) {
|
||||
if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' {
|
||||
return math.NaN(), nil
|
||||
}
|
||||
|
||||
cleaned, err := checkAndRemoveUnderscoresFloats(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if cleaned[0] == '.' {
|
||||
return 0, unstable.NewParserError(b, "float cannot start with a dot")
|
||||
}
|
||||
|
||||
if cleaned[len(cleaned)-1] == '.' {
|
||||
return 0, unstable.NewParserError(b, "float cannot end with a dot")
|
||||
}
|
||||
|
||||
dotAlreadySeen := false
|
||||
for i, c := range cleaned {
|
||||
if c == '.' {
|
||||
if dotAlreadySeen {
|
||||
return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point")
|
||||
}
|
||||
if !isDigit(cleaned[i-1]) {
|
||||
return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit")
|
||||
}
|
||||
if !isDigit(cleaned[i+1]) {
|
||||
return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit")
|
||||
}
|
||||
dotAlreadySeen = true
|
||||
}
|
||||
}
|
||||
|
||||
start := 0
|
||||
if cleaned[0] == '+' || cleaned[0] == '-' {
|
||||
start = 1
|
||||
}
|
||||
if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) {
|
||||
return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes")
|
||||
}
|
||||
|
||||
f, err := strconv.ParseFloat(string(cleaned), 64)
|
||||
if err != nil {
|
||||
return 0, unstable.NewParserError(b, "unable to parse float: %w", err)
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func parseIntHex(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 16, 64)
|
||||
if err != nil {
|
||||
return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func parseIntOct(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 8, 64)
|
||||
if err != nil {
|
||||
return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func parseIntBin(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 2, 64)
|
||||
if err != nil {
|
||||
return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func isSign(b byte) bool {
|
||||
return b == '+' || b == '-'
|
||||
}
|
||||
|
||||
func parseIntDec(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
startIdx := 0
|
||||
|
||||
if isSign(cleaned[0]) {
|
||||
startIdx++
|
||||
}
|
||||
|
||||
if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' {
|
||||
return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number")
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 10, 64)
|
||||
if err != nil {
|
||||
return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) {
|
||||
start := 0
|
||||
if b[start] == '+' || b[start] == '-' {
|
||||
start++
|
||||
}
|
||||
|
||||
if len(b) == start {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
if b[start] == '_' {
|
||||
return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore")
|
||||
}
|
||||
|
||||
if b[len(b)-1] == '_' {
|
||||
return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore")
|
||||
}
|
||||
|
||||
// fast path
|
||||
i := 0
|
||||
for ; i < len(b); i++ {
|
||||
if b[i] == '_' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == len(b) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
before := false
|
||||
cleaned := make([]byte, i, len(b))
|
||||
copy(cleaned, b)
|
||||
|
||||
for i++; i < len(b); i++ {
|
||||
c := b[i]
|
||||
if c == '_' {
|
||||
if !before {
|
||||
return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores")
|
||||
}
|
||||
before = false
|
||||
} else {
|
||||
before = true
|
||||
cleaned = append(cleaned, c)
|
||||
}
|
||||
}
|
||||
|
||||
return cleaned, nil
|
||||
}
|
||||
|
||||
func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
|
||||
if b[0] == '_' {
|
||||
return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore")
|
||||
}
|
||||
|
||||
if b[len(b)-1] == '_' {
|
||||
return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore")
|
||||
}
|
||||
|
||||
// fast path
|
||||
i := 0
|
||||
for ; i < len(b); i++ {
|
||||
if b[i] == '_' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == len(b) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
before := false
|
||||
cleaned := make([]byte, 0, len(b))
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
switch c {
|
||||
case '_':
|
||||
if !before {
|
||||
return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores")
|
||||
}
|
||||
if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') {
|
||||
return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent")
|
||||
}
|
||||
before = false
|
||||
case '+', '-':
|
||||
// signed exponents
|
||||
cleaned = append(cleaned, c)
|
||||
before = false
|
||||
case 'e', 'E':
|
||||
if i < len(b)-1 && b[i+1] == '_' {
|
||||
return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent")
|
||||
}
|
||||
cleaned = append(cleaned, c)
|
||||
case '.':
|
||||
if i < len(b)-1 && b[i+1] == '_' {
|
||||
return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point")
|
||||
}
|
||||
if i > 0 && b[i-1] == '_' {
|
||||
return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point")
|
||||
}
|
||||
cleaned = append(cleaned, c)
|
||||
default:
|
||||
before = true
|
||||
cleaned = append(cleaned, c)
|
||||
}
|
||||
}
|
||||
|
||||
return cleaned, nil
|
||||
}
|
||||
|
||||
// isValidDate checks if a provided date is a date that exists.
|
||||
func isValidDate(year int, month int, day int) bool {
|
||||
return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year)
|
||||
}
|
||||
|
||||
// daysBefore[m] counts the number of days in a non-leap year
|
||||
// before month m begins. There is an entry for m=12, counting
|
||||
// the number of days before January of next year (365).
|
||||
var daysBefore = [...]int32{
|
||||
0,
|
||||
31,
|
||||
31 + 28,
|
||||
31 + 28 + 31,
|
||||
31 + 28 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
|
||||
}
|
||||
|
||||
func daysIn(m int, year int) int {
|
||||
if m == 2 && isLeap(year) {
|
||||
return 29
|
||||
}
|
||||
return int(daysBefore[m] - daysBefore[m-1])
|
||||
}
|
||||
|
||||
func isLeap(year int) bool {
|
||||
return year%4 == 0 && (year%100 != 0 || year%400 == 0)
|
||||
}
|
||||
|
||||
func isDigit(r byte) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
2
vendor/github.com/pelletier/go-toml/v2/doc.go
generated
vendored
Normal file
2
vendor/github.com/pelletier/go-toml/v2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package toml is a library to read and write TOML documents.
|
||||
package toml
|
||||
252
vendor/github.com/pelletier/go-toml/v2/errors.go
generated
vendored
Normal file
252
vendor/github.com/pelletier/go-toml/v2/errors.go
generated
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
||||
"github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
// DecodeError represents an error encountered during the parsing or decoding
|
||||
// of a TOML document.
|
||||
//
|
||||
// In addition to the error message, it contains the position in the document
|
||||
// where it happened, as well as a human-readable representation that shows
|
||||
// where the error occurred in the document.
|
||||
type DecodeError struct {
|
||||
message string
|
||||
line int
|
||||
column int
|
||||
key Key
|
||||
|
||||
human string
|
||||
}
|
||||
|
||||
// StrictMissingError occurs in a TOML document that does not have a
|
||||
// corresponding field in the target value. It contains all the missing fields
|
||||
// in Errors.
|
||||
//
|
||||
// Emitted by Decoder when DisallowUnknownFields() was called.
|
||||
type StrictMissingError struct {
|
||||
// One error per field that could not be found.
|
||||
Errors []DecodeError
|
||||
}
|
||||
|
||||
// Error returns the canonical string for this error.
|
||||
func (s *StrictMissingError) Error() string {
|
||||
return "strict mode: fields in the document are missing in the target struct"
|
||||
}
|
||||
|
||||
// String returns a human readable description of all errors.
|
||||
func (s *StrictMissingError) String() string {
|
||||
var buf strings.Builder
|
||||
|
||||
for i, e := range s.Errors {
|
||||
if i > 0 {
|
||||
buf.WriteString("\n---\n")
|
||||
}
|
||||
|
||||
buf.WriteString(e.String())
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type Key []string
|
||||
|
||||
// Error returns the error message contained in the DecodeError.
|
||||
func (e *DecodeError) Error() string {
|
||||
return "toml: " + e.message
|
||||
}
|
||||
|
||||
// String returns the human-readable contextualized error. This string is multi-line.
|
||||
func (e *DecodeError) String() string {
|
||||
return e.human
|
||||
}
|
||||
|
||||
// Position returns the (line, column) pair indicating where the error
|
||||
// occurred in the document. Positions are 1-indexed.
|
||||
func (e *DecodeError) Position() (row int, column int) {
|
||||
return e.line, e.column
|
||||
}
|
||||
|
||||
// Key that was being processed when the error occurred. The key is present only
|
||||
// if this DecodeError is part of a StrictMissingError.
|
||||
func (e *DecodeError) Key() Key {
|
||||
return e.key
|
||||
}
|
||||
|
||||
// decodeErrorFromHighlight creates a DecodeError referencing a highlighted
|
||||
// range of bytes from document.
|
||||
//
|
||||
// highlight needs to be a sub-slice of document, or this function panics.
|
||||
//
|
||||
// The function copies all bytes used in DecodeError, so that document and
|
||||
// highlight can be freely deallocated.
|
||||
//
|
||||
//nolint:funlen
|
||||
func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError {
|
||||
offset := danger.SubsliceOffset(document, de.Highlight)
|
||||
|
||||
errMessage := de.Error()
|
||||
errLine, errColumn := positionAtEnd(document[:offset])
|
||||
before, after := linesOfContext(document, de.Highlight, offset, 3)
|
||||
|
||||
var buf strings.Builder
|
||||
|
||||
maxLine := errLine + len(after) - 1
|
||||
lineColumnWidth := len(strconv.Itoa(maxLine))
|
||||
|
||||
// Write the lines of context strictly before the error.
|
||||
for i := len(before) - 1; i > 0; i-- {
|
||||
line := errLine - i
|
||||
buf.WriteString(formatLineNumber(line, lineColumnWidth))
|
||||
buf.WriteString("|")
|
||||
|
||||
if len(before[i]) > 0 {
|
||||
buf.WriteString(" ")
|
||||
buf.Write(before[i])
|
||||
}
|
||||
|
||||
buf.WriteRune('\n')
|
||||
}
|
||||
|
||||
// Write the document line that contains the error.
|
||||
|
||||
buf.WriteString(formatLineNumber(errLine, lineColumnWidth))
|
||||
buf.WriteString("| ")
|
||||
|
||||
if len(before) > 0 {
|
||||
buf.Write(before[0])
|
||||
}
|
||||
|
||||
buf.Write(de.Highlight)
|
||||
|
||||
if len(after) > 0 {
|
||||
buf.Write(after[0])
|
||||
}
|
||||
|
||||
buf.WriteRune('\n')
|
||||
|
||||
// Write the line with the error message itself (so it does not have a line
|
||||
// number).
|
||||
|
||||
buf.WriteString(strings.Repeat(" ", lineColumnWidth))
|
||||
buf.WriteString("| ")
|
||||
|
||||
if len(before) > 0 {
|
||||
buf.WriteString(strings.Repeat(" ", len(before[0])))
|
||||
}
|
||||
|
||||
buf.WriteString(strings.Repeat("~", len(de.Highlight)))
|
||||
|
||||
if len(errMessage) > 0 {
|
||||
buf.WriteString(" ")
|
||||
buf.WriteString(errMessage)
|
||||
}
|
||||
|
||||
// Write the lines of context strictly after the error.
|
||||
|
||||
for i := 1; i < len(after); i++ {
|
||||
buf.WriteRune('\n')
|
||||
line := errLine + i
|
||||
buf.WriteString(formatLineNumber(line, lineColumnWidth))
|
||||
buf.WriteString("|")
|
||||
|
||||
if len(after[i]) > 0 {
|
||||
buf.WriteString(" ")
|
||||
buf.Write(after[i])
|
||||
}
|
||||
}
|
||||
|
||||
return &DecodeError{
|
||||
message: errMessage,
|
||||
line: errLine,
|
||||
column: errColumn,
|
||||
key: de.Key,
|
||||
human: buf.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func formatLineNumber(line int, width int) string {
|
||||
format := "%" + strconv.Itoa(width) + "d"
|
||||
|
||||
return fmt.Sprintf(format, line)
|
||||
}
|
||||
|
||||
func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) {
|
||||
return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround)
|
||||
}
|
||||
|
||||
func beforeLines(document []byte, offset int, linesAround int) [][]byte {
|
||||
var beforeLines [][]byte
|
||||
|
||||
// Walk the document backward from the highlight to find previous lines
|
||||
// of context.
|
||||
rest := document[:offset]
|
||||
backward:
|
||||
for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; {
|
||||
switch {
|
||||
case rest[o] == '\n':
|
||||
// handle individual lines
|
||||
beforeLines = append(beforeLines, rest[o+1:])
|
||||
rest = rest[:o]
|
||||
o = len(rest) - 1
|
||||
case o == 0:
|
||||
// add the first line only if it's non-empty
|
||||
beforeLines = append(beforeLines, rest)
|
||||
|
||||
break backward
|
||||
default:
|
||||
o--
|
||||
}
|
||||
}
|
||||
|
||||
return beforeLines
|
||||
}
|
||||
|
||||
func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte {
|
||||
var afterLines [][]byte
|
||||
|
||||
// Walk the document forward from the highlight to find the following
|
||||
// lines of context.
|
||||
rest := document[offset+len(highlight):]
|
||||
forward:
|
||||
for o := 0; o < len(rest) && len(afterLines) <= linesAround; {
|
||||
switch {
|
||||
case rest[o] == '\n':
|
||||
// handle individual lines
|
||||
afterLines = append(afterLines, rest[:o])
|
||||
rest = rest[o+1:]
|
||||
o = 0
|
||||
|
||||
case o == len(rest)-1:
|
||||
// add last line only if it's non-empty
|
||||
afterLines = append(afterLines, rest)
|
||||
|
||||
break forward
|
||||
default:
|
||||
o++
|
||||
}
|
||||
}
|
||||
|
||||
return afterLines
|
||||
}
|
||||
|
||||
func positionAtEnd(b []byte) (row int, column int) {
|
||||
row = 1
|
||||
column = 1
|
||||
|
||||
for _, c := range b {
|
||||
if c == '\n' {
|
||||
row++
|
||||
column = 1
|
||||
} else {
|
||||
column++
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
42
vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
generated
vendored
Normal file
42
vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package characters
|
||||
|
||||
var invalidAsciiTable = [256]bool{
|
||||
0x00: true,
|
||||
0x01: true,
|
||||
0x02: true,
|
||||
0x03: true,
|
||||
0x04: true,
|
||||
0x05: true,
|
||||
0x06: true,
|
||||
0x07: true,
|
||||
0x08: true,
|
||||
// 0x09 TAB
|
||||
// 0x0A LF
|
||||
0x0B: true,
|
||||
0x0C: true,
|
||||
// 0x0D CR
|
||||
0x0E: true,
|
||||
0x0F: true,
|
||||
0x10: true,
|
||||
0x11: true,
|
||||
0x12: true,
|
||||
0x13: true,
|
||||
0x14: true,
|
||||
0x15: true,
|
||||
0x16: true,
|
||||
0x17: true,
|
||||
0x18: true,
|
||||
0x19: true,
|
||||
0x1A: true,
|
||||
0x1B: true,
|
||||
0x1C: true,
|
||||
0x1D: true,
|
||||
0x1E: true,
|
||||
0x1F: true,
|
||||
// 0x20 - 0x7E Printable ASCII characters
|
||||
0x7F: true,
|
||||
}
|
||||
|
||||
func InvalidAscii(b byte) bool {
|
||||
return invalidAsciiTable[b]
|
||||
}
|
||||
199
vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
generated
vendored
Normal file
199
vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
package characters
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type utf8Err struct {
|
||||
Index int
|
||||
Size int
|
||||
}
|
||||
|
||||
func (u utf8Err) Zero() bool {
|
||||
return u.Size == 0
|
||||
}
|
||||
|
||||
// Verified that a given string is only made of valid UTF-8 characters allowed
|
||||
// by the TOML spec:
|
||||
//
|
||||
// Any Unicode character may be used except those that must be escaped:
|
||||
// quotation mark, backslash, and the control characters other than tab (U+0000
|
||||
// to U+0008, U+000A to U+001F, U+007F).
|
||||
//
|
||||
// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early
|
||||
// when a character is not allowed.
|
||||
//
|
||||
// The returned utf8Err is Zero() if the string is valid, or contains the byte
|
||||
// index and size of the invalid character.
|
||||
//
|
||||
// quotation mark => already checked
|
||||
// backslash => already checked
|
||||
// 0-0x8 => invalid
|
||||
// 0x9 => tab, ok
|
||||
// 0xA - 0x1F => invalid
|
||||
// 0x7F => invalid
|
||||
func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) {
|
||||
// Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
|
||||
offset := 0
|
||||
for len(p) >= 8 {
|
||||
// Combining two 32 bit loads allows the same code to be used
|
||||
// for 32 and 64 bit platforms.
|
||||
// The compiler can generate a 32bit load for first32 and second32
|
||||
// on many platforms. See test/codegen/memcombine.go.
|
||||
first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
|
||||
second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24
|
||||
if (first32|second32)&0x80808080 != 0 {
|
||||
// Found a non ASCII byte (>= RuneSelf).
|
||||
break
|
||||
}
|
||||
|
||||
for i, b := range p[:8] {
|
||||
if InvalidAscii(b) {
|
||||
err.Index = offset + i
|
||||
err.Size = 1
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
p = p[8:]
|
||||
offset += 8
|
||||
}
|
||||
n := len(p)
|
||||
for i := 0; i < n; {
|
||||
pi := p[i]
|
||||
if pi < utf8.RuneSelf {
|
||||
if InvalidAscii(pi) {
|
||||
err.Index = offset + i
|
||||
err.Size = 1
|
||||
return
|
||||
}
|
||||
i++
|
||||
continue
|
||||
}
|
||||
x := first[pi]
|
||||
if x == xx {
|
||||
// Illegal starter byte.
|
||||
err.Index = offset + i
|
||||
err.Size = 1
|
||||
return
|
||||
}
|
||||
size := int(x & 7)
|
||||
if i+size > n {
|
||||
// Short or invalid.
|
||||
err.Index = offset + i
|
||||
err.Size = n - i
|
||||
return
|
||||
}
|
||||
accept := acceptRanges[x>>4]
|
||||
if c := p[i+1]; c < accept.lo || accept.hi < c {
|
||||
err.Index = offset + i
|
||||
err.Size = 2
|
||||
return
|
||||
} else if size == 2 {
|
||||
} else if c := p[i+2]; c < locb || hicb < c {
|
||||
err.Index = offset + i
|
||||
err.Size = 3
|
||||
return
|
||||
} else if size == 3 {
|
||||
} else if c := p[i+3]; c < locb || hicb < c {
|
||||
err.Index = offset + i
|
||||
err.Size = 4
|
||||
return
|
||||
}
|
||||
i += size
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Return the size of the next rune if valid, 0 otherwise.
|
||||
func Utf8ValidNext(p []byte) int {
|
||||
c := p[0]
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
if InvalidAscii(c) {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
x := first[c]
|
||||
if x == xx {
|
||||
// Illegal starter byte.
|
||||
return 0
|
||||
}
|
||||
size := int(x & 7)
|
||||
if size > len(p) {
|
||||
// Short or invalid.
|
||||
return 0
|
||||
}
|
||||
accept := acceptRanges[x>>4]
|
||||
if c := p[1]; c < accept.lo || accept.hi < c {
|
||||
return 0
|
||||
} else if size == 2 {
|
||||
} else if c := p[2]; c < locb || hicb < c {
|
||||
return 0
|
||||
} else if size == 3 {
|
||||
} else if c := p[3]; c < locb || hicb < c {
|
||||
return 0
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// acceptRange gives the range of valid values for the second byte in a UTF-8
|
||||
// sequence.
|
||||
type acceptRange struct {
|
||||
lo uint8 // lowest value for second byte.
|
||||
hi uint8 // highest value for second byte.
|
||||
}
|
||||
|
||||
// acceptRanges has size 16 to avoid bounds checks in the code that uses it.
|
||||
var acceptRanges = [16]acceptRange{
|
||||
0: {locb, hicb},
|
||||
1: {0xA0, hicb},
|
||||
2: {locb, 0x9F},
|
||||
3: {0x90, hicb},
|
||||
4: {locb, 0x8F},
|
||||
}
|
||||
|
||||
// first is information about the first byte in a UTF-8 sequence.
|
||||
var first = [256]uint8{
|
||||
// 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
|
||||
// 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
|
||||
xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
|
||||
s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
|
||||
s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
|
||||
s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
|
||||
}
|
||||
|
||||
const (
|
||||
// The default lowest and highest continuation byte.
|
||||
locb = 0b10000000
|
||||
hicb = 0b10111111
|
||||
|
||||
// These names of these constants are chosen to give nice alignment in the
|
||||
// table below. The first nibble is an index into acceptRanges or F for
|
||||
// special one-byte cases. The second nibble is the Rune length or the
|
||||
// Status for the special one-byte case.
|
||||
xx = 0xF1 // invalid: size 1
|
||||
as = 0xF0 // ASCII: size 1
|
||||
s1 = 0x02 // accept 0, size 2
|
||||
s2 = 0x13 // accept 1, size 3
|
||||
s3 = 0x03 // accept 0, size 3
|
||||
s4 = 0x23 // accept 2, size 3
|
||||
s5 = 0x34 // accept 3, size 4
|
||||
s6 = 0x04 // accept 0, size 4
|
||||
s7 = 0x44 // accept 4, size 4
|
||||
)
|
||||
65
vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
generated
vendored
Normal file
65
vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package danger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const maxInt = uintptr(int(^uint(0) >> 1))
|
||||
|
||||
func SubsliceOffset(data []byte, subslice []byte) int {
|
||||
datap := (*reflect.SliceHeader)(unsafe.Pointer(&data))
|
||||
hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice))
|
||||
|
||||
if hlp.Data < datap.Data {
|
||||
panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data))
|
||||
}
|
||||
offset := hlp.Data - datap.Data
|
||||
|
||||
if offset > maxInt {
|
||||
panic(fmt.Errorf("slice offset larger than int (%d)", offset))
|
||||
}
|
||||
|
||||
intoffset := int(offset)
|
||||
|
||||
if intoffset > datap.Len {
|
||||
panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len))
|
||||
}
|
||||
|
||||
if intoffset+hlp.Len > datap.Len {
|
||||
panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len))
|
||||
}
|
||||
|
||||
return intoffset
|
||||
}
|
||||
|
||||
func BytesRange(start []byte, end []byte) []byte {
|
||||
if start == nil || end == nil {
|
||||
panic("cannot call BytesRange with nil")
|
||||
}
|
||||
startp := (*reflect.SliceHeader)(unsafe.Pointer(&start))
|
||||
endp := (*reflect.SliceHeader)(unsafe.Pointer(&end))
|
||||
|
||||
if startp.Data > endp.Data {
|
||||
panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data))
|
||||
}
|
||||
|
||||
l := startp.Len
|
||||
endLen := int(endp.Data-startp.Data) + endp.Len
|
||||
if endLen > l {
|
||||
l = endLen
|
||||
}
|
||||
|
||||
if l > startp.Cap {
|
||||
panic(fmt.Errorf("range length is larger than capacity"))
|
||||
}
|
||||
|
||||
return start[:l]
|
||||
}
|
||||
|
||||
func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer {
|
||||
// TODO: replace with unsafe.Add when Go 1.17 is released
|
||||
// https://github.com/golang/go/issues/40481
|
||||
return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset))
|
||||
}
|
||||
23
vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
generated
vendored
Normal file
23
vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package danger
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// typeID is used as key in encoder and decoder caches to enable using
|
||||
// the optimize runtime.mapaccess2_fast64 function instead of the more
|
||||
// expensive lookup if we were to use reflect.Type as map key.
|
||||
//
|
||||
// typeID holds the pointer to the reflect.Type value, which is unique
|
||||
// in the program.
|
||||
//
|
||||
// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61
|
||||
type TypeID unsafe.Pointer
|
||||
|
||||
func MakeTypeID(t reflect.Type) TypeID {
|
||||
// reflect.Type has the fields:
|
||||
// typ unsafe.Pointer
|
||||
// ptr unsafe.Pointer
|
||||
return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1])
|
||||
}
|
||||
48
vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
generated
vendored
Normal file
48
vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
package tracker
|
||||
|
||||
import "github.com/pelletier/go-toml/v2/unstable"
|
||||
|
||||
// KeyTracker is a tracker that keeps track of the current Key as the AST is
|
||||
// walked.
|
||||
type KeyTracker struct {
|
||||
k []string
|
||||
}
|
||||
|
||||
// UpdateTable sets the state of the tracker with the AST table node.
|
||||
func (t *KeyTracker) UpdateTable(node *unstable.Node) {
|
||||
t.reset()
|
||||
t.Push(node)
|
||||
}
|
||||
|
||||
// UpdateArrayTable sets the state of the tracker with the AST array table node.
|
||||
func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) {
|
||||
t.reset()
|
||||
t.Push(node)
|
||||
}
|
||||
|
||||
// Push the given key on the stack.
|
||||
func (t *KeyTracker) Push(node *unstable.Node) {
|
||||
it := node.Key()
|
||||
for it.Next() {
|
||||
t.k = append(t.k, string(it.Node().Data))
|
||||
}
|
||||
}
|
||||
|
||||
// Pop key from stack.
|
||||
func (t *KeyTracker) Pop(node *unstable.Node) {
|
||||
it := node.Key()
|
||||
for it.Next() {
|
||||
t.k = t.k[:len(t.k)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// Key returns the current key
|
||||
func (t *KeyTracker) Key() []string {
|
||||
k := make([]string, len(t.k))
|
||||
copy(k, t.k)
|
||||
return k
|
||||
}
|
||||
|
||||
func (t *KeyTracker) reset() {
|
||||
t.k = t.k[:0]
|
||||
}
|
||||
358
vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
generated
vendored
Normal file
358
vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
generated
vendored
Normal file
@@ -0,0 +1,358 @@
|
||||
package tracker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
type keyKind uint8
|
||||
|
||||
const (
|
||||
invalidKind keyKind = iota
|
||||
valueKind
|
||||
tableKind
|
||||
arrayTableKind
|
||||
)
|
||||
|
||||
func (k keyKind) String() string {
|
||||
switch k {
|
||||
case invalidKind:
|
||||
return "invalid"
|
||||
case valueKind:
|
||||
return "value"
|
||||
case tableKind:
|
||||
return "table"
|
||||
case arrayTableKind:
|
||||
return "array table"
|
||||
}
|
||||
panic("missing keyKind string mapping")
|
||||
}
|
||||
|
||||
// SeenTracker tracks which keys have been seen with which TOML type to flag
|
||||
// duplicates and mismatches according to the spec.
|
||||
//
|
||||
// Each node in the visited tree is represented by an entry. Each entry has an
|
||||
// identifier, which is provided by a counter. Entries are stored in the array
|
||||
// entries. As new nodes are discovered (referenced for the first time in the
|
||||
// TOML document), entries are created and appended to the array. An entry
|
||||
// points to its parent using its id.
|
||||
//
|
||||
// To find whether a given key (sequence of []byte) has already been visited,
|
||||
// the entries are linearly searched, looking for one with the right name and
|
||||
// parent id.
|
||||
//
|
||||
// Given that all keys appear in the document after their parent, it is
|
||||
// guaranteed that all descendants of a node are stored after the node, this
|
||||
// speeds up the search process.
|
||||
//
|
||||
// When encountering [[array tables]], the descendants of that node are removed
|
||||
// to allow that branch of the tree to be "rediscovered". To maintain the
|
||||
// invariant above, the deletion process needs to keep the order of entries.
|
||||
// This results in more copies in that case.
|
||||
type SeenTracker struct {
|
||||
entries []entry
|
||||
currentIdx int
|
||||
}
|
||||
|
||||
var pool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &SeenTracker{}
|
||||
},
|
||||
}
|
||||
|
||||
func (s *SeenTracker) reset() {
|
||||
// Always contains a root element at index 0.
|
||||
s.currentIdx = 0
|
||||
if len(s.entries) == 0 {
|
||||
s.entries = make([]entry, 1, 2)
|
||||
} else {
|
||||
s.entries = s.entries[:1]
|
||||
}
|
||||
s.entries[0].child = -1
|
||||
s.entries[0].next = -1
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
// Use -1 to indicate no child or no sibling.
|
||||
child int
|
||||
next int
|
||||
|
||||
name []byte
|
||||
kind keyKind
|
||||
explicit bool
|
||||
kv bool
|
||||
}
|
||||
|
||||
// Find the index of the child of parentIdx with key k. Returns -1 if
|
||||
// it does not exist.
|
||||
func (s *SeenTracker) find(parentIdx int, k []byte) int {
|
||||
for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next {
|
||||
if bytes.Equal(s.entries[i].name, k) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Remove all descendants of node at position idx.
|
||||
func (s *SeenTracker) clear(idx int) {
|
||||
if idx >= len(s.entries) {
|
||||
return
|
||||
}
|
||||
|
||||
for i := s.entries[idx].child; i >= 0; {
|
||||
next := s.entries[i].next
|
||||
n := s.entries[0].next
|
||||
s.entries[0].next = i
|
||||
s.entries[i].next = n
|
||||
s.entries[i].name = nil
|
||||
s.clear(i)
|
||||
i = next
|
||||
}
|
||||
|
||||
s.entries[idx].child = -1
|
||||
}
|
||||
|
||||
func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int {
|
||||
e := entry{
|
||||
child: -1,
|
||||
next: s.entries[parentIdx].child,
|
||||
|
||||
name: name,
|
||||
kind: kind,
|
||||
explicit: explicit,
|
||||
kv: kv,
|
||||
}
|
||||
var idx int
|
||||
if s.entries[0].next >= 0 {
|
||||
idx = s.entries[0].next
|
||||
s.entries[0].next = s.entries[idx].next
|
||||
s.entries[idx] = e
|
||||
} else {
|
||||
idx = len(s.entries)
|
||||
s.entries = append(s.entries, e)
|
||||
}
|
||||
|
||||
s.entries[parentIdx].child = idx
|
||||
|
||||
return idx
|
||||
}
|
||||
|
||||
func (s *SeenTracker) setExplicitFlag(parentIdx int) {
|
||||
for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next {
|
||||
if s.entries[i].kv {
|
||||
s.entries[i].explicit = true
|
||||
s.entries[i].kv = false
|
||||
}
|
||||
s.setExplicitFlag(i)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckExpression takes a top-level node and checks that it does not contain
|
||||
// keys that have been seen in previous calls, and validates that types are
|
||||
// consistent. It returns true if it is the first time this node's key is seen.
|
||||
// Useful to clear array tables on first use.
|
||||
func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) {
|
||||
if s.entries == nil {
|
||||
s.reset()
|
||||
}
|
||||
switch node.Kind {
|
||||
case unstable.KeyValue:
|
||||
return s.checkKeyValue(node)
|
||||
case unstable.Table:
|
||||
return s.checkTable(node)
|
||||
case unstable.ArrayTable:
|
||||
return s.checkArrayTable(node)
|
||||
default:
|
||||
panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) {
|
||||
if s.currentIdx >= 0 {
|
||||
s.setExplicitFlag(s.currentIdx)
|
||||
}
|
||||
|
||||
it := node.Key()
|
||||
|
||||
parentIdx := 0
|
||||
|
||||
// This code is duplicated in checkArrayTable. This is because factoring
|
||||
// it in a function requires to copy the iterator, or allocate it to the
|
||||
// heap, which is not cheap.
|
||||
for it.Next() {
|
||||
if it.IsLast() {
|
||||
break
|
||||
}
|
||||
|
||||
k := it.Node().Data
|
||||
|
||||
idx := s.find(parentIdx, k)
|
||||
|
||||
if idx < 0 {
|
||||
idx = s.create(parentIdx, k, tableKind, false, false)
|
||||
} else {
|
||||
entry := s.entries[idx]
|
||||
if entry.kind == valueKind {
|
||||
return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
|
||||
}
|
||||
}
|
||||
parentIdx = idx
|
||||
}
|
||||
|
||||
k := it.Node().Data
|
||||
idx := s.find(parentIdx, k)
|
||||
|
||||
first := false
|
||||
if idx >= 0 {
|
||||
kind := s.entries[idx].kind
|
||||
if kind != tableKind {
|
||||
return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind)
|
||||
}
|
||||
if s.entries[idx].explicit {
|
||||
return false, fmt.Errorf("toml: table %s already exists", string(k))
|
||||
}
|
||||
s.entries[idx].explicit = true
|
||||
} else {
|
||||
idx = s.create(parentIdx, k, tableKind, true, false)
|
||||
first = true
|
||||
}
|
||||
|
||||
s.currentIdx = idx
|
||||
|
||||
return first, nil
|
||||
}
|
||||
|
||||
func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) {
|
||||
if s.currentIdx >= 0 {
|
||||
s.setExplicitFlag(s.currentIdx)
|
||||
}
|
||||
|
||||
it := node.Key()
|
||||
|
||||
parentIdx := 0
|
||||
|
||||
for it.Next() {
|
||||
if it.IsLast() {
|
||||
break
|
||||
}
|
||||
|
||||
k := it.Node().Data
|
||||
|
||||
idx := s.find(parentIdx, k)
|
||||
|
||||
if idx < 0 {
|
||||
idx = s.create(parentIdx, k, tableKind, false, false)
|
||||
} else {
|
||||
entry := s.entries[idx]
|
||||
if entry.kind == valueKind {
|
||||
return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
|
||||
}
|
||||
}
|
||||
|
||||
parentIdx = idx
|
||||
}
|
||||
|
||||
k := it.Node().Data
|
||||
idx := s.find(parentIdx, k)
|
||||
|
||||
firstTime := idx < 0
|
||||
if firstTime {
|
||||
idx = s.create(parentIdx, k, arrayTableKind, true, false)
|
||||
} else {
|
||||
kind := s.entries[idx].kind
|
||||
if kind != arrayTableKind {
|
||||
return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k))
|
||||
}
|
||||
s.clear(idx)
|
||||
}
|
||||
|
||||
s.currentIdx = idx
|
||||
|
||||
return firstTime, nil
|
||||
}
|
||||
|
||||
func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) {
|
||||
parentIdx := s.currentIdx
|
||||
it := node.Key()
|
||||
|
||||
for it.Next() {
|
||||
k := it.Node().Data
|
||||
|
||||
idx := s.find(parentIdx, k)
|
||||
|
||||
if idx < 0 {
|
||||
idx = s.create(parentIdx, k, tableKind, false, true)
|
||||
} else {
|
||||
entry := s.entries[idx]
|
||||
if it.IsLast() {
|
||||
return false, fmt.Errorf("toml: key %s is already defined", string(k))
|
||||
} else if entry.kind != tableKind {
|
||||
return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
|
||||
} else if entry.explicit {
|
||||
return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k))
|
||||
}
|
||||
}
|
||||
|
||||
parentIdx = idx
|
||||
}
|
||||
|
||||
s.entries[parentIdx].kind = valueKind
|
||||
|
||||
value := node.Value()
|
||||
|
||||
switch value.Kind {
|
||||
case unstable.InlineTable:
|
||||
return s.checkInlineTable(value)
|
||||
case unstable.Array:
|
||||
return s.checkArray(value)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) {
|
||||
it := node.Children()
|
||||
for it.Next() {
|
||||
n := it.Node()
|
||||
switch n.Kind {
|
||||
case unstable.InlineTable:
|
||||
first, err = s.checkInlineTable(n)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
case unstable.Array:
|
||||
first, err = s.checkArray(n)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return first, nil
|
||||
}
|
||||
|
||||
func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) {
|
||||
s = pool.Get().(*SeenTracker)
|
||||
s.reset()
|
||||
|
||||
it := node.Children()
|
||||
for it.Next() {
|
||||
n := it.Node()
|
||||
first, err = s.checkKeyValue(n)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// As inline tables are self-contained, the tracker does not
|
||||
// need to retain the details of what they contain. The
|
||||
// keyValue element that creates the inline table is kept to
|
||||
// mark the presence of the inline table and prevent
|
||||
// redefinition of its keys: check* functions cannot walk into
|
||||
// a value.
|
||||
pool.Put(s)
|
||||
return first, nil
|
||||
}
|
||||
1
vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
generated
vendored
Normal file
1
vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
package tracker
|
||||
122
vendor/github.com/pelletier/go-toml/v2/localtime.go
generated
vendored
Normal file
122
vendor/github.com/pelletier/go-toml/v2/localtime.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
// LocalDate represents a calendar day in no specific timezone.
|
||||
type LocalDate struct {
|
||||
Year int
|
||||
Month int
|
||||
Day int
|
||||
}
|
||||
|
||||
// AsTime converts d into a specific time instance at midnight in zone.
|
||||
func (d LocalDate) AsTime(zone *time.Location) time.Time {
|
||||
return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone)
|
||||
}
|
||||
|
||||
// String returns RFC 3339 representation of d.
|
||||
func (d LocalDate) String() string {
|
||||
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
|
||||
}
|
||||
|
||||
// MarshalText returns RFC 3339 representation of d.
|
||||
func (d LocalDate) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses b using RFC 3339 to fill d.
|
||||
func (d *LocalDate) UnmarshalText(b []byte) error {
|
||||
res, err := parseLocalDate(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// LocalTime represents a time of day of no specific day in no specific
|
||||
// timezone.
|
||||
type LocalTime struct {
|
||||
Hour int // Hour of the day: [0; 24[
|
||||
Minute int // Minute of the hour: [0; 60[
|
||||
Second int // Second of the minute: [0; 60[
|
||||
Nanosecond int // Nanoseconds within the second: [0, 1000000000[
|
||||
Precision int // Number of digits to display for Nanosecond.
|
||||
}
|
||||
|
||||
// String returns RFC 3339 representation of d.
|
||||
// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond
|
||||
// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number
|
||||
// of digits for nanoseconds is provided.
|
||||
func (d LocalTime) String() string {
|
||||
s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second)
|
||||
|
||||
if d.Precision > 0 {
|
||||
s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1]
|
||||
} else if d.Nanosecond > 0 {
|
||||
// Nanoseconds are specified, but precision is not provided. Use the
|
||||
// minimum.
|
||||
s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0")
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// MarshalText returns RFC 3339 representation of d.
|
||||
func (d LocalTime) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses b using RFC 3339 to fill d.
|
||||
func (d *LocalTime) UnmarshalText(b []byte) error {
|
||||
res, left, err := parseLocalTime(b)
|
||||
if err == nil && len(left) != 0 {
|
||||
err = unstable.NewParserError(left, "extra characters")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// LocalDateTime represents a time of a specific day in no specific timezone.
|
||||
type LocalDateTime struct {
|
||||
LocalDate
|
||||
LocalTime
|
||||
}
|
||||
|
||||
// AsTime converts d into a specific time instance in zone.
|
||||
func (d LocalDateTime) AsTime(zone *time.Location) time.Time {
|
||||
return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone)
|
||||
}
|
||||
|
||||
// String returns RFC 3339 representation of d.
|
||||
func (d LocalDateTime) String() string {
|
||||
return d.LocalDate.String() + "T" + d.LocalTime.String()
|
||||
}
|
||||
|
||||
// MarshalText returns RFC 3339 representation of d.
|
||||
func (d LocalDateTime) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses b using RFC 3339 to fill d.
|
||||
func (d *LocalDateTime) UnmarshalText(data []byte) error {
|
||||
res, left, err := parseLocalDateTime(data)
|
||||
if err == nil && len(left) != 0 {
|
||||
err = unstable.NewParserError(left, "extra characters")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*d = res
|
||||
return nil
|
||||
}
|
||||
1133
vendor/github.com/pelletier/go-toml/v2/marshaler.go
generated
vendored
Normal file
1133
vendor/github.com/pelletier/go-toml/v2/marshaler.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
107
vendor/github.com/pelletier/go-toml/v2/strict.go
generated
vendored
Normal file
107
vendor/github.com/pelletier/go-toml/v2/strict.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
||||
"github.com/pelletier/go-toml/v2/internal/tracker"
|
||||
"github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
type strict struct {
|
||||
Enabled bool
|
||||
|
||||
// Tracks the current key being processed.
|
||||
key tracker.KeyTracker
|
||||
|
||||
missing []unstable.ParserError
|
||||
}
|
||||
|
||||
func (s *strict) EnterTable(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.key.UpdateTable(node)
|
||||
}
|
||||
|
||||
func (s *strict) EnterArrayTable(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.key.UpdateArrayTable(node)
|
||||
}
|
||||
|
||||
func (s *strict) EnterKeyValue(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.key.Push(node)
|
||||
}
|
||||
|
||||
func (s *strict) ExitKeyValue(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.key.Pop(node)
|
||||
}
|
||||
|
||||
func (s *strict) MissingTable(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.missing = append(s.missing, unstable.ParserError{
|
||||
Highlight: keyLocation(node),
|
||||
Message: "missing table",
|
||||
Key: s.key.Key(),
|
||||
})
|
||||
}
|
||||
|
||||
func (s *strict) MissingField(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.missing = append(s.missing, unstable.ParserError{
|
||||
Highlight: keyLocation(node),
|
||||
Message: "missing field",
|
||||
Key: s.key.Key(),
|
||||
})
|
||||
}
|
||||
|
||||
func (s *strict) Error(doc []byte) error {
|
||||
if !s.Enabled || len(s.missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := &StrictMissingError{
|
||||
Errors: make([]DecodeError, 0, len(s.missing)),
|
||||
}
|
||||
|
||||
for _, derr := range s.missing {
|
||||
derr := derr
|
||||
err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func keyLocation(node *unstable.Node) []byte {
|
||||
k := node.Key()
|
||||
|
||||
hasOne := k.Next()
|
||||
if !hasOne {
|
||||
panic("should not be called with empty key")
|
||||
}
|
||||
|
||||
start := k.Node().Data
|
||||
end := k.Node().Data
|
||||
|
||||
for k.Next() {
|
||||
end = k.Node().Data
|
||||
}
|
||||
|
||||
return danger.BytesRange(start, end)
|
||||
}
|
||||
243
vendor/github.com/pelletier/go-toml/v2/toml.abnf
generated
vendored
Normal file
243
vendor/github.com/pelletier/go-toml/v2/toml.abnf
generated
vendored
Normal file
@@ -0,0 +1,243 @@
|
||||
;; This document describes TOML's syntax, using the ABNF format (defined in
|
||||
;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt).
|
||||
;;
|
||||
;; All valid TOML documents will match this description, however certain
|
||||
;; invalid documents would need to be rejected as per the semantics described
|
||||
;; in the supporting text description.
|
||||
|
||||
;; It is possible to try this grammar interactively, using instaparse.
|
||||
;; http://instaparse.mojombo.com/
|
||||
;;
|
||||
;; To do so, in the lower right, click on Options and change `:input-format` to
|
||||
;; ':abnf'. Then paste this entire ABNF document into the grammar entry box
|
||||
;; (above the options). Then you can type or paste a sample TOML document into
|
||||
;; the beige box on the left. Tada!
|
||||
|
||||
;; Overall Structure
|
||||
|
||||
toml = expression *( newline expression )
|
||||
|
||||
expression = ws [ comment ]
|
||||
expression =/ ws keyval ws [ comment ]
|
||||
expression =/ ws table ws [ comment ]
|
||||
|
||||
;; Whitespace
|
||||
|
||||
ws = *wschar
|
||||
wschar = %x20 ; Space
|
||||
wschar =/ %x09 ; Horizontal tab
|
||||
|
||||
;; Newline
|
||||
|
||||
newline = %x0A ; LF
|
||||
newline =/ %x0D.0A ; CRLF
|
||||
|
||||
;; Comment
|
||||
|
||||
comment-start-symbol = %x23 ; #
|
||||
non-ascii = %x80-D7FF / %xE000-10FFFF
|
||||
non-eol = %x09 / %x20-7F / non-ascii
|
||||
|
||||
comment = comment-start-symbol *non-eol
|
||||
|
||||
;; Key-Value pairs
|
||||
|
||||
keyval = key keyval-sep val
|
||||
|
||||
key = simple-key / dotted-key
|
||||
simple-key = quoted-key / unquoted-key
|
||||
|
||||
unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
|
||||
quoted-key = basic-string / literal-string
|
||||
dotted-key = simple-key 1*( dot-sep simple-key )
|
||||
|
||||
dot-sep = ws %x2E ws ; . Period
|
||||
keyval-sep = ws %x3D ws ; =
|
||||
|
||||
val = string / boolean / array / inline-table / date-time / float / integer
|
||||
|
||||
;; String
|
||||
|
||||
string = ml-basic-string / basic-string / ml-literal-string / literal-string
|
||||
|
||||
;; Basic String
|
||||
|
||||
basic-string = quotation-mark *basic-char quotation-mark
|
||||
|
||||
quotation-mark = %x22 ; "
|
||||
|
||||
basic-char = basic-unescaped / escaped
|
||||
basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
|
||||
escaped = escape escape-seq-char
|
||||
|
||||
escape = %x5C ; \
|
||||
escape-seq-char = %x22 ; " quotation mark U+0022
|
||||
escape-seq-char =/ %x5C ; \ reverse solidus U+005C
|
||||
escape-seq-char =/ %x62 ; b backspace U+0008
|
||||
escape-seq-char =/ %x66 ; f form feed U+000C
|
||||
escape-seq-char =/ %x6E ; n line feed U+000A
|
||||
escape-seq-char =/ %x72 ; r carriage return U+000D
|
||||
escape-seq-char =/ %x74 ; t tab U+0009
|
||||
escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX
|
||||
escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX
|
||||
|
||||
;; Multiline Basic String
|
||||
|
||||
ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
|
||||
ml-basic-string-delim
|
||||
ml-basic-string-delim = 3quotation-mark
|
||||
ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ]
|
||||
|
||||
mlb-content = mlb-char / newline / mlb-escaped-nl
|
||||
mlb-char = mlb-unescaped / escaped
|
||||
mlb-quotes = 1*2quotation-mark
|
||||
mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
|
||||
mlb-escaped-nl = escape ws newline *( wschar / newline )
|
||||
|
||||
;; Literal String
|
||||
|
||||
literal-string = apostrophe *literal-char apostrophe
|
||||
|
||||
apostrophe = %x27 ; ' apostrophe
|
||||
|
||||
literal-char = %x09 / %x20-26 / %x28-7E / non-ascii
|
||||
|
||||
;; Multiline Literal String
|
||||
|
||||
ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body
|
||||
ml-literal-string-delim
|
||||
ml-literal-string-delim = 3apostrophe
|
||||
ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ]
|
||||
|
||||
mll-content = mll-char / newline
|
||||
mll-char = %x09 / %x20-26 / %x28-7E / non-ascii
|
||||
mll-quotes = 1*2apostrophe
|
||||
|
||||
;; Integer
|
||||
|
||||
integer = dec-int / hex-int / oct-int / bin-int
|
||||
|
||||
minus = %x2D ; -
|
||||
plus = %x2B ; +
|
||||
underscore = %x5F ; _
|
||||
digit1-9 = %x31-39 ; 1-9
|
||||
digit0-7 = %x30-37 ; 0-7
|
||||
digit0-1 = %x30-31 ; 0-1
|
||||
|
||||
hex-prefix = %x30.78 ; 0x
|
||||
oct-prefix = %x30.6F ; 0o
|
||||
bin-prefix = %x30.62 ; 0b
|
||||
|
||||
dec-int = [ minus / plus ] unsigned-dec-int
|
||||
unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT )
|
||||
|
||||
hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG )
|
||||
oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 )
|
||||
bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 )
|
||||
|
||||
;; Float
|
||||
|
||||
float = float-int-part ( exp / frac [ exp ] )
|
||||
float =/ special-float
|
||||
|
||||
float-int-part = dec-int
|
||||
frac = decimal-point zero-prefixable-int
|
||||
decimal-point = %x2E ; .
|
||||
zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT )
|
||||
|
||||
exp = "e" float-exp-part
|
||||
float-exp-part = [ minus / plus ] zero-prefixable-int
|
||||
|
||||
special-float = [ minus / plus ] ( inf / nan )
|
||||
inf = %x69.6e.66 ; inf
|
||||
nan = %x6e.61.6e ; nan
|
||||
|
||||
;; Boolean
|
||||
|
||||
boolean = true / false
|
||||
|
||||
true = %x74.72.75.65 ; true
|
||||
false = %x66.61.6C.73.65 ; false
|
||||
|
||||
;; Date and Time (as defined in RFC 3339)
|
||||
|
||||
date-time = offset-date-time / local-date-time / local-date / local-time
|
||||
|
||||
date-fullyear = 4DIGIT
|
||||
date-month = 2DIGIT ; 01-12
|
||||
date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year
|
||||
time-delim = "T" / %x20 ; T, t, or space
|
||||
time-hour = 2DIGIT ; 00-23
|
||||
time-minute = 2DIGIT ; 00-59
|
||||
time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules
|
||||
time-secfrac = "." 1*DIGIT
|
||||
time-numoffset = ( "+" / "-" ) time-hour ":" time-minute
|
||||
time-offset = "Z" / time-numoffset
|
||||
|
||||
partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ]
|
||||
full-date = date-fullyear "-" date-month "-" date-mday
|
||||
full-time = partial-time time-offset
|
||||
|
||||
;; Offset Date-Time
|
||||
|
||||
offset-date-time = full-date time-delim full-time
|
||||
|
||||
;; Local Date-Time
|
||||
|
||||
local-date-time = full-date time-delim partial-time
|
||||
|
||||
;; Local Date
|
||||
|
||||
local-date = full-date
|
||||
|
||||
;; Local Time
|
||||
|
||||
local-time = partial-time
|
||||
|
||||
;; Array
|
||||
|
||||
array = array-open [ array-values ] ws-comment-newline array-close
|
||||
|
||||
array-open = %x5B ; [
|
||||
array-close = %x5D ; ]
|
||||
|
||||
array-values = ws-comment-newline val ws-comment-newline array-sep array-values
|
||||
array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ]
|
||||
|
||||
array-sep = %x2C ; , Comma
|
||||
|
||||
ws-comment-newline = *( wschar / [ comment ] newline )
|
||||
|
||||
;; Table
|
||||
|
||||
table = std-table / array-table
|
||||
|
||||
;; Standard Table
|
||||
|
||||
std-table = std-table-open key std-table-close
|
||||
|
||||
std-table-open = %x5B ws ; [ Left square bracket
|
||||
std-table-close = ws %x5D ; ] Right square bracket
|
||||
|
||||
;; Inline Table
|
||||
|
||||
inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close
|
||||
|
||||
inline-table-open = %x7B ws ; {
|
||||
inline-table-close = ws %x7D ; }
|
||||
inline-table-sep = ws %x2C ws ; , Comma
|
||||
|
||||
inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ]
|
||||
|
||||
;; Array Table
|
||||
|
||||
array-table = array-table-open key array-table-close
|
||||
|
||||
array-table-open = %x5B.5B ws ; [[ Double left square bracket
|
||||
array-table-close = ws %x5D.5D ; ]] Double right square bracket
|
||||
|
||||
;; Built-in ABNF terms, reproduced here for clarity
|
||||
|
||||
ALPHA = %x41-5A / %x61-7A ; A-Z / a-z
|
||||
DIGIT = %x30-39 ; 0-9
|
||||
HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F"
|
||||
14
vendor/github.com/pelletier/go-toml/v2/types.go
generated
vendored
Normal file
14
vendor/github.com/pelletier/go-toml/v2/types.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil))
|
||||
var sliceInterfaceType = reflect.TypeOf([]interface{}(nil))
|
||||
var stringType = reflect.TypeOf("")
|
||||
1334
vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
generated
vendored
Normal file
1334
vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
136
vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
generated
vendored
Normal file
136
vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
package unstable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
||||
)
|
||||
|
||||
// Iterator over a sequence of nodes.
|
||||
//
|
||||
// Starts uninitialized, you need to call Next() first.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// it := n.Children()
|
||||
// for it.Next() {
|
||||
// n := it.Node()
|
||||
// // do something with n
|
||||
// }
|
||||
type Iterator struct {
|
||||
started bool
|
||||
node *Node
|
||||
}
|
||||
|
||||
// Next moves the iterator forward and returns true if points to a
|
||||
// node, false otherwise.
|
||||
func (c *Iterator) Next() bool {
|
||||
if !c.started {
|
||||
c.started = true
|
||||
} else if c.node.Valid() {
|
||||
c.node = c.node.Next()
|
||||
}
|
||||
return c.node.Valid()
|
||||
}
|
||||
|
||||
// IsLast returns true if the current node of the iterator is the last
|
||||
// one. Subsequent calls to Next() will return false.
|
||||
func (c *Iterator) IsLast() bool {
|
||||
return c.node.next == 0
|
||||
}
|
||||
|
||||
// Node returns a pointer to the node pointed at by the iterator.
|
||||
func (c *Iterator) Node() *Node {
|
||||
return c.node
|
||||
}
|
||||
|
||||
// Node in a TOML expression AST.
|
||||
//
|
||||
// Depending on Kind, its sequence of children should be interpreted
|
||||
// differently.
|
||||
//
|
||||
// - Array have one child per element in the array.
|
||||
// - InlineTable have one child per key-value in the table (each of kind
|
||||
// InlineTable).
|
||||
// - KeyValue have at least two children. The first one is the value. The rest
|
||||
// make a potentially dotted key.
|
||||
// - Table and ArrayTable's children represent a dotted key (same as
|
||||
// KeyValue, but without the first node being the value).
|
||||
//
|
||||
// When relevant, Raw describes the range of bytes this node is referring to in
|
||||
// the input document. Use Parser.Raw() to retrieve the actual bytes.
|
||||
type Node struct {
|
||||
Kind Kind
|
||||
Raw Range // Raw bytes from the input.
|
||||
Data []byte // Node value (either allocated or referencing the input).
|
||||
|
||||
// References to other nodes, as offsets in the backing array
|
||||
// from this node. References can go backward, so those can be
|
||||
// negative.
|
||||
next int // 0 if last element
|
||||
child int // 0 if no child
|
||||
}
|
||||
|
||||
// Range of bytes in the document.
|
||||
type Range struct {
|
||||
Offset uint32
|
||||
Length uint32
|
||||
}
|
||||
|
||||
// Next returns a pointer to the next node, or nil if there is no next node.
|
||||
func (n *Node) Next() *Node {
|
||||
if n.next == 0 {
|
||||
return nil
|
||||
}
|
||||
ptr := unsafe.Pointer(n)
|
||||
size := unsafe.Sizeof(Node{})
|
||||
return (*Node)(danger.Stride(ptr, size, n.next))
|
||||
}
|
||||
|
||||
// Child returns a pointer to the first child node of this node. Other children
|
||||
// can be accessed calling Next on the first child. Returns an nil if this Node
|
||||
// has no child.
|
||||
func (n *Node) Child() *Node {
|
||||
if n.child == 0 {
|
||||
return nil
|
||||
}
|
||||
ptr := unsafe.Pointer(n)
|
||||
size := unsafe.Sizeof(Node{})
|
||||
return (*Node)(danger.Stride(ptr, size, n.child))
|
||||
}
|
||||
|
||||
// Valid returns true if the node's kind is set (not to Invalid).
|
||||
func (n *Node) Valid() bool {
|
||||
return n != nil
|
||||
}
|
||||
|
||||
// Key returns the children nodes making the Key on a supported node. Panics
|
||||
// otherwise. They are guaranteed to be all be of the Kind Key. A simple key
|
||||
// would return just one element.
|
||||
func (n *Node) Key() Iterator {
|
||||
switch n.Kind {
|
||||
case KeyValue:
|
||||
value := n.Child()
|
||||
if !value.Valid() {
|
||||
panic(fmt.Errorf("KeyValue should have at least two children"))
|
||||
}
|
||||
return Iterator{node: value.Next()}
|
||||
case Table, ArrayTable:
|
||||
return Iterator{node: n.Child()}
|
||||
default:
|
||||
panic(fmt.Errorf("Key() is not supported on a %s", n.Kind))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns a pointer to the value node of a KeyValue.
|
||||
// Guaranteed to be non-nil. Panics if not called on a KeyValue node,
|
||||
// or if the Children are malformed.
|
||||
func (n *Node) Value() *Node {
|
||||
return n.Child()
|
||||
}
|
||||
|
||||
// Children returns an iterator over a node's children.
|
||||
func (n *Node) Children() Iterator {
|
||||
return Iterator{node: n.Child()}
|
||||
}
|
||||
71
vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
generated
vendored
Normal file
71
vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
package unstable
|
||||
|
||||
// root contains a full AST.
|
||||
//
|
||||
// It is immutable once constructed with Builder.
|
||||
type root struct {
|
||||
nodes []Node
|
||||
}
|
||||
|
||||
// Iterator over the top level nodes.
|
||||
func (r *root) Iterator() Iterator {
|
||||
it := Iterator{}
|
||||
if len(r.nodes) > 0 {
|
||||
it.node = &r.nodes[0]
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (r *root) at(idx reference) *Node {
|
||||
return &r.nodes[idx]
|
||||
}
|
||||
|
||||
type reference int
|
||||
|
||||
const invalidReference reference = -1
|
||||
|
||||
func (r reference) Valid() bool {
|
||||
return r != invalidReference
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
tree root
|
||||
lastIdx int
|
||||
}
|
||||
|
||||
func (b *builder) Tree() *root {
|
||||
return &b.tree
|
||||
}
|
||||
|
||||
func (b *builder) NodeAt(ref reference) *Node {
|
||||
return b.tree.at(ref)
|
||||
}
|
||||
|
||||
func (b *builder) Reset() {
|
||||
b.tree.nodes = b.tree.nodes[:0]
|
||||
b.lastIdx = 0
|
||||
}
|
||||
|
||||
func (b *builder) Push(n Node) reference {
|
||||
b.lastIdx = len(b.tree.nodes)
|
||||
b.tree.nodes = append(b.tree.nodes, n)
|
||||
return reference(b.lastIdx)
|
||||
}
|
||||
|
||||
func (b *builder) PushAndChain(n Node) reference {
|
||||
newIdx := len(b.tree.nodes)
|
||||
b.tree.nodes = append(b.tree.nodes, n)
|
||||
if b.lastIdx >= 0 {
|
||||
b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx
|
||||
}
|
||||
b.lastIdx = newIdx
|
||||
return reference(b.lastIdx)
|
||||
}
|
||||
|
||||
func (b *builder) AttachChild(parent reference, child reference) {
|
||||
b.tree.nodes[parent].child = int(child) - int(parent)
|
||||
}
|
||||
|
||||
func (b *builder) Chain(from reference, to reference) {
|
||||
b.tree.nodes[from].next = int(to) - int(from)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user