mirror of
https://github.com/akyaiy/GoSally-mvp.git
synced 2026-01-03 08:32:24 +00:00
Compare commits
194 Commits
ec94df5f4a
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| d4413c433f | |||
| d9a4bb7871 | |||
| 8e017af3ed | |||
| ef6023330d | |||
| 5474b22fc8 | |||
| 6cd678d9f1 | |||
| 856d3b418c | |||
| 5734ca7a67 | |||
| 608c5aed4a | |||
|
|
d4d04115f3 | ||
|
|
4b916f4fc9 | ||
| 54cc496c39 | |||
| f7b0014a37 | |||
| 54eb5eec6a | |||
| 6cc24a1e7f | |||
| ea41c435dd | |||
| d24e1a94ae | |||
| 846dc06601 | |||
| 740fbbff78 | |||
| 40be3c8d09 | |||
| 9c140abc6d | |||
| e90233aec4 | |||
| df1ef57769 | |||
| 4c840c40bb | |||
| 57f35e8f33 | |||
| f0c591f325 | |||
| 36ee320c45 | |||
| ee6fd205d5 | |||
| bed0471cc4 | |||
| e3812a18a6 | |||
| b7d939d5d7 | |||
| c737e80b8f | |||
| 5783a756c3 | |||
| ba47ee4219 | |||
| 5d49e0afc7 | |||
| 76fed578ff | |||
| 975c52b58e | |||
| 4e75d48f1d | |||
| 65af07fffa | |||
| 1252634420 | |||
| 4a58845211 | |||
| b0701632e6 | |||
| 9277aa9f1a | |||
| 19654e1eca | |||
| d4306a0d89 | |||
| 73095a69e0 | |||
| 0f82ce941b | |||
|
|
0ec8493ab4 | ||
| 625e5daf71 | |||
| cc27843bb3 | |||
| 20fec82159 | |||
| 055b299ecb | |||
| 17bf207087 | |||
| 7ae8e12dc8 | |||
| 6e36db428a | |||
| 06103a3264 | |||
| c6da55ad65 | |||
| 20a1e3e7bb | |||
| e594d519a7 | |||
| 2ceb236a53 | |||
| 811403a0a2 | |||
| b451f2d3fc | |||
| 5c01eaad6f | |||
| 2b38e179db | |||
| 2889092821 | |||
| 3df3a7b4b5 | |||
| c63f1bd123 | |||
| 095b8559f4 | |||
| 39532f22ea | |||
| 35cebee819 | |||
| 84dfdd6b35 | |||
| e693efe8e7 | |||
| c3dcf24e50 | |||
| 9e7d99e854 | |||
| 7f2783b39a | |||
| c08135309f | |||
| cd9e3ab6c4 | |||
| adaedf195f | |||
| 87694f6654 | |||
| fe628e0f7f | |||
| 3898e2833b | |||
| e4db8505a0 | |||
| 0c25d00171 | |||
| b5a6de0b62 | |||
| 1d3d74846e | |||
| 0141427bfe | |||
| 866946646b | |||
| 251e580e8a | |||
| c734779b69 | |||
| 0923f32b46 | |||
| 1c2c4c1356 | |||
| d3eb483461 | |||
| 5b32698ec5 | |||
| 0ed734b2b1 | |||
| 396352ba15 | |||
| 7b9bdcf768 | |||
| 47058f0ddd | |||
|
|
24eef9eee0 | ||
|
|
a6c9e5102f | ||
| a72627d87c | |||
| 4a9719cdfb | |||
| 7de5ec5248 | |||
| e5f9105364 | |||
| ce2a23f9e6 | |||
| d56b022bf5 | |||
| ca38c10ec4 | |||
| 13dbd00bb7 | |||
| e7289dc9be | |||
| 5394178abc | |||
| 981551e944 | |||
| 27446adf3f | |||
| 2f071c25b2 | |||
| d23fd32e84 | |||
| 86d35a9ede | |||
| c77d51a95c | |||
| 3cbea14e84 | |||
| 6e59af1662 | |||
| 8684d178e0 | |||
| 945ab6c9cf | |||
| 520901c331 | |||
| 9a274250cd | |||
| 6d49d83ea7 | |||
| fb04b3bc46 | |||
| a60b75a4c0 | |||
| 041fda8522 | |||
| 6508f03d08 | |||
| 93cf53025c | |||
| 83912b6c28 | |||
| 6ed5a7f9e0 | |||
| 2f78e9367c | |||
| ac074ce0ff | |||
| 8bdf9197d6 | |||
| 4db8fa2360 | |||
| 2a48927a08 | |||
| 58027bb988 | |||
| 30a87fdb4c | |||
| 5cdfb2a543 | |||
| 08e96aa32a | |||
| 3b8390a0c8 | |||
| b6ad0f82a0 | |||
| 7009828e79 | |||
| 45e541ac00 | |||
| a5a7354061 | |||
| 20bb90e77a | |||
| 148ca53538 | |||
| 2951fd2da9 | |||
| f411637520 | |||
| 75ee6e10aa | |||
| cfa7724b68 | |||
| f44e89b0de | |||
| 23ed707029 | |||
| 299fd59e19 | |||
| b601962354 | |||
| 38f784b850 | |||
| 6d2bf5cdd2 | |||
| 166c8470d4 | |||
| 64510a5307 | |||
| b454f4de8d | |||
| c161639766 | |||
| dd336a7d9a | |||
| ab37ecb7f7 | |||
| bd02f079ab | |||
| b97febc16e | |||
| 149cfc0a17 | |||
| 00276dc817 | |||
| ec2ef34f23 | |||
| aebc3d2e9b | |||
| 22ff90ca56 | |||
| f3c4b9e9b1 | |||
| 98d2443679 | |||
| c61bc841e6 | |||
| 74f166e6cf | |||
|
|
81359c036c | ||
| 92c89996f5 | |||
| 1c73d3f87a | |||
| e35972b8ad | |||
| 0344d58ad4 | |||
| cf7bd1ceec | |||
| c3540bfbe1 | |||
| bd54628b5c | |||
| b103736a9d | |||
| 7eeedf0b31 | |||
| 1675001f24 | |||
| e01ecdf1db | |||
| febee7cac5 | |||
| bf5e136dc9 | |||
| 86cdc9adf2 | |||
| f09afdb850 | |||
| efbca43f27 | |||
| a0451aa8a0 | |||
| 7608bcfed3 | |||
| c62710a7d0 | |||
| 0151c3f68a | |||
| 1f36f2d7bc |
12
.gitignore
vendored
12
.gitignore
vendored
@@ -3,7 +3,17 @@ bin/
|
||||
cert/
|
||||
tmp/
|
||||
.meta/
|
||||
db/
|
||||
|
||||
com/test.lua
|
||||
com/_config.lua
|
||||
|
||||
.vscode
|
||||
Taskfile.yml
|
||||
config.yaml
|
||||
|
||||
config.yaml
|
||||
wiki
|
||||
|
||||
# Garbage
|
||||
com/_*
|
||||
com/test.lua
|
||||
40
Makefile
40
Makefile
@@ -4,7 +4,10 @@ GOPATH := $(shell go env GOPATH)
|
||||
export CONFIG_PATH := ./config.yaml
|
||||
export NODE_PATH := $(shell pwd)
|
||||
|
||||
LDFLAGS := -X 'github.com/akyaiy/GoSally-mvp/core/config.NodeVersion=v0.0.1-dev'
|
||||
NODE_VERSION := v0.0.1-dev
|
||||
SV1_VERSION := v0.0.1-dev
|
||||
|
||||
LDFLAGS := -X 'github.com/akyaiy/GoSally-mvp/src/internal/engine/config.NodeVersion=$(NODE_VERSION)' -X 'github.com/akyaiy/GoSally-mvp/src/internal/server/sv1.SV1Version=$(SV1_VERSION)'
|
||||
CGO_CFLAGS := -I/usr/local/include
|
||||
CGO_LDFLAGS := -L/usr/local/lib -llua5.1 -lm -ldl
|
||||
.PHONY: all build run runq test fmt vet lint check clean
|
||||
@@ -29,8 +32,15 @@ build:
|
||||
@echo "Building..."
|
||||
@# @echo "CGO_CFLAGS is: '$(CGO_CFLAGS)'"
|
||||
@# @echo "CGO_LDFLAGS is: '$(CGO_LDFLAGS)'"
|
||||
@# CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)"
|
||||
go build -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/$(APP_NAME) ./
|
||||
@# CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)"
|
||||
cd src && go build -trimpath -ldflags "-w -s $(LDFLAGS)" -o ../$(BIN_DIR)/$(APP_NAME) ./
|
||||
# @if ! command -v upx >/dev/null 2>&1; then \
|
||||
# echo "upx not found, skipping compression."; \
|
||||
# elif upx -t $(BIN_DIR)/$(APP_NAME) >/dev/null 2>&1; then \
|
||||
# echo "$(BIN_DIR)/$(APP_NAME) already compressed, skipping."; \
|
||||
# else \
|
||||
# upx $(BIN_DIR)/$(APP_NAME) >/dev/null 2>&1 || true; \
|
||||
# fi
|
||||
|
||||
run: build
|
||||
@echo "Running!"
|
||||
@@ -45,28 +55,22 @@ pure-run:
|
||||
exec ./$(BIN_DIR)/$(APP_NAME)
|
||||
|
||||
test:
|
||||
@go test ./... | grep -v '^?' || true
|
||||
@cd src && go test ./... | grep -v '^?' || true
|
||||
|
||||
fmt:
|
||||
@go fmt ./internal/./...
|
||||
@go fmt ./cmd/./...
|
||||
@$(GOPATH)/bin/goimports -w ./internal/
|
||||
@$(GOPATH)/bin/goimports -w ./cmd/
|
||||
@cd src && go fmt .
|
||||
@cd src && $(GOPATH)/bin/goimports -w .
|
||||
|
||||
vet:
|
||||
@go vet ./...
|
||||
|
||||
lint:
|
||||
@$(GOPATH)/bin/golangci-lint run
|
||||
|
||||
@cd src && go vet ./...
|
||||
check: fmt vet lint test
|
||||
|
||||
licenses:
|
||||
lint:
|
||||
@cd src && $(GOPATH)/bin/golangci-lint run ./...
|
||||
@$(GOPATH)/bin/go-licenses save ./... --save_path=third_party/licenses --force
|
||||
@echo "Licenses have been exported to third_party/licenses"
|
||||
|
||||
clean:
|
||||
@rm -rf bin
|
||||
|
||||
licenses:
|
||||
@cd src && $(GOPATH)/bin/go-licenses save ./... --save_path=../third_party/licenses --force
|
||||
@echo "Licenses have been exported to third_party/licenses"
|
||||
help:
|
||||
@echo "Available commands: $$(cat Makefile | grep -E '^[a-zA-Z_-]+:.*?' | grep -v -- '-setup:' | sed 's/:.*//g' | sort | uniq | tr '\n' ' ')"
|
||||
|
||||
147
README.md
147
README.md
@@ -1,51 +1,132 @@
|
||||
# Go Sally MVP (Minimum/Minimal Viable Product)
|
||||
[]()
|
||||
[](https://go.dev/)
|
||||
[](https://www.lua.org/manual/5.1/)
|
||||
[](https://pkg.go.dev/github.com/akyaiy/GoSally-mvp)
|
||||
[](LICENSE)
|
||||
|
||||
### What is this?
|
||||
System that allows you to build your own infrastructure based on identical nodes and various scripts written using built-in Lua 5.1, shebang scripts (scripts that start with the `#!` symbols), compiled binaries.
|
||||
[]()
|
||||
[]()
|
||||
[](https://github.com/akyaiy/GoSally-mvp/wiki)
|
||||
|
||||
### Features
|
||||
Go Sally is not viable at the moment, but it already has the ability to run embedded scripts, log slog events to stdout, handle RPC like requests, and independent automatic update from the repository (my pride, to be honest).
|
||||
|
||||
### Example of use
|
||||
The basic directory tree looks something like this
|
||||
```
|
||||
.
|
||||
├── bin
|
||||
│ └── node Node core binary file
|
||||
├── com
|
||||
│ ├── echo.lua
|
||||
│ ├── _globals.lua Declaring global variables and functions for all internal scripts (also required for luarc to work correctly)
|
||||
│ └── _prepare.lua Script that is executed before each script launch
|
||||
└── config.yaml
|
||||
> ⚡ **What, Why, Why Care?**
|
||||
|
||||
3 directories, 5 files
|
||||
> **What:** Go Sally is a lightweight decentralized node system with Lua scripting and JSON-RPC2.0.
|
||||
|
||||
```
|
||||
Launch by command
|
||||
> **Why:** Large admin tools are too heavy, and Raspberry Pi and small servers require a lightweight, modular architecture.
|
||||
|
||||
> **Why Care:** Create, automate, and expand your infrastructure quickly, without unnecessary software or dependencies.
|
||||
|
||||
## Navigation
|
||||
* [Core features](#core-features)
|
||||
* [Quick start](#quick-start)
|
||||
* [Test it](#test-it)
|
||||
* [Concept](#concept)
|
||||
* [API](#api)
|
||||
* [License](#license)
|
||||
* [Wiki →](https://github.com/akyaiy/GoSally-mvp/wiki)
|
||||
|
||||
> [!NOTE]
|
||||
> If you see "💡" in the text, it means the information below is about plans for the future of the project.
|
||||
|
||||
## Core features
|
||||
- **Decentralized nodes**<details>this means that *multiple GS[^1] nodes can be located on a single machine*, provided no attempt is made to disrupt, sabotage, or bypass the built-in protection mechanism against running a node under the same identifier as one already running in the system. Identification plays a role in node communication. 💡 In the future, we plan to create tools for conveniently building distributed systems using node identification.
|
||||
**Why Care?** Multiple nodes on one machine allow testing, experimentation, and scaling small infrastructures without extra hardware or complex setup.</details>
|
||||
- **RPC request processing**<details>the GS operates *using HTTP/https and the JSONRPC2.0 protocol.* Unlike gRPC, jsonrpc is extremely simple, allows for easy sending of requests from the browser, and does not require any additional code compilation. **Why Care?** Easy-to-use API means you can control nodes from anywhere, including lightweight web clients, without compiling extra code.</details>
|
||||
- **Lua script-based methods**<details>*The gopher-lua library is used, providing full support for Lua 5.1.* scripts implement libraries for interacting with sessions (receiving parameters and sending responses), hashing, logging, and more. This allows you to quickly write business logic on the fly without touching the lower layers of abstraction, which also eliminates unnecessary compilation and the risk of breaking the codebase.
|
||||
Example of the "echo" method:
|
||||
```lua
|
||||
local session = require("internal.session")
|
||||
-- import the internal library for interacting with sessions
|
||||
|
||||
session.response.send(session.request.params.get())
|
||||
-- send everything passed in the parameters in response.
|
||||
```
|
||||
**Why Care?** You can extend node behavior dynamically, write custom logic fast, and iterate without recompiling — perfect for experiments or automation.
|
||||
</details>
|
||||
- **Relatively flexible configuration** <details>
|
||||
you can configure the server port, address, name, node settings, and more. 💡 More settings are planned in the future. **Why Care?** Configure nodes for any environment, from Raspberry Pi to VPS, without touching the source code. obviously :)</details>
|
||||
- ***And more in the future***
|
||||
|
||||
> [!IMPORTANT]
|
||||
> This is the beginning of the project's development, and some aspects of it may be unstable, unfinished, and the text about it may be overly ambitious. It's just a matter of time.
|
||||
|
||||
## Quick start
|
||||
```bash
|
||||
git clone https://github.com/akyaiy/GoSally-mvp.git && \
|
||||
cd GoSally-mvp && \
|
||||
make build && \
|
||||
echo -e "node:\n com_dir: \"%path%/com\"" > config.yaml && \
|
||||
mkdir -p com && \
|
||||
echo -e 'local session = require("internal.session")\n\nsession.response.send(session.request.params.get())' > com/echo.lua && \
|
||||
./bin/node run
|
||||
```
|
||||
or for structured logs
|
||||
```bash
|
||||
./bin/node run | jq
|
||||
```
|
||||
|
||||
Example of GET request to server
|
||||
If you have problems, make sure you have all [dependencies](https://github.com/akyaiy/GoSally-mvp/wiki/Getting-started#installing-dependencies) installed, otherwise [file an issue report](https://github.com/akyaiy/GoSally-mvp/issues)
|
||||
|
||||
### Test it
|
||||
```bash
|
||||
curl -s http://localhost:8080/api/v1/com/echo?msg=Hello
|
||||
curl -X POST http://localhost:8080/com \
|
||||
-d '{"jsonrpc":"2.0","context-version": "v1","method":"echo","params":["Hi!!"],"id":1}'
|
||||
```
|
||||
Then the response from the server
|
||||
Expected response:
|
||||
```json
|
||||
{
|
||||
"ResponsibleAgentUUID": "4593a87000bbe088f4e79c477e9c90d3",
|
||||
"RequestedCommand": "echo",
|
||||
"Response": {
|
||||
"answer": "Hello",
|
||||
"status": "ok"
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": [
|
||||
"Hi!!"
|
||||
],
|
||||
"data": {
|
||||
"responsible-node": "a0e1c440473ffd4d87e32cff2717f5b3",
|
||||
"salt": "f26df732-a3be-4400-8e71-b8dc3ba705fc",
|
||||
"checksum-md5": "cd8bec6a365d1b8ee90773567cb3ad0a"
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### How to install
|
||||
**You don't need it now, but you can figure it out with the Makefile**
|
||||
## Concept
|
||||
The project was originally conceived as a tool for building infrastructure using relatively *small nodes with limited functionality*. 💡 In the future, we plan to create a *web interface for interacting with nodes, administration, and configuration*. The concept is simple: suppose we have a node that manages Bind9. It has all the necessary methods for interacting with the service: creating new zones, viewing zone status, changing configuration, and server operation status. All of this works only through manual configuration, with the exception of larger solutions like Webmin and the BIND DNS Server module. The big problem is that while we only needed web configuration for Bind9, we have to pull in a massive amount of software just to implement one module. What if the service is hosted on a low-power Raspberry Pi? That's where GS nodes come in. By default, GS nodes communicate only through API calls, so 💡 in the future, we plan to create a dedicated, also programmable, web node that will provide convenient access to node management.
|
||||
|
||||
There's an obvious advantage here: transparency. The project is *completely open source and aims to support community-driven node functionality*. 💡 In the future, we plan to create a "store" similar to Docker Hub, which will contain scripts for configuring bind9, openvpn, and even custom projects.
|
||||
|
||||
## API
|
||||
As mentioned earlier, *the server handles [jsonrpc2.0](https://www.jsonrpc.org/specification) requests*
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"context-version": "v1",
|
||||
"method": "test",
|
||||
"params": [
|
||||
"Hi!!"
|
||||
],
|
||||
"id": 1
|
||||
}
|
||||
```
|
||||
This is a typical example of a request using the jsonrpc2.0 protocol.
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": [
|
||||
"Hi!!"
|
||||
],
|
||||
"data": {
|
||||
"responsible-node": "2ad6ebeaf579a7c52801fb6c9dd1b83d",
|
||||
"salt": "e7a81115-01c1-45b1-9618-0eae0ff26451",
|
||||
"checksum-md5": "cd8bec6a365d1b8ee90773567cb3ad0a"
|
||||
}
|
||||
}
|
||||
```
|
||||
In the result field, we see the echo method's response. Those familiar with the jsonrpc2.0 specification will notice that the data structure here is unclear. This is my extension, which has three functions:
|
||||
| Field | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `responsible-node` | string | ID of the node that executed the task |
|
||||
| `salt` | string | Random value for each request — can be used to check that the response is unique |
|
||||
| `checksum-md5` | string | MD5 hash of the result field — can be used to avoid processing identical results separately |
|
||||
|
||||
## License
|
||||
Distributed under the BSD 3-Clause License. See [`LICENSE`](./LICENSE) for more information.
|
||||
|
||||
[^1]: Go Sally
|
||||
|
||||
34
cmd/root.go
34
cmd/root.go
@@ -1,34 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/logs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var compositor *config.Compositor = config.NewCompositor()
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "Go Sally node",
|
||||
Long: "Main node runner for Go Sally",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
_ = cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetPrefix(logs.SetBrightBlack(fmt.Sprintf("(%s) ", corestate.StageNotReady)))
|
||||
log.SetFlags(log.Ldate | log.Ltime)
|
||||
compositor.LoadCMDLine(rootCmd)
|
||||
_ = rootCmd.Execute()
|
||||
// if err := rootCmd.Execute(); err != nil {
|
||||
// log.Fatalf("Unexpected error: %s", err.Error())
|
||||
// }
|
||||
}
|
||||
339
cmd/run.go
339
cmd/run.go
@@ -1,339 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/run_manager"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/update"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/utils"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/app"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/logs"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/server/gateway"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/server/sv1"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/cors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/netutil"
|
||||
"gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
func contains(slice []string, item string) bool {
|
||||
for _, v := range slice {
|
||||
if v == item {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var runCmd = &cobra.Command{
|
||||
Use: "run",
|
||||
Short: "Run node normally",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
nodeApp := app.New()
|
||||
|
||||
nodeApp.InitialHooks(
|
||||
func(cs *corestate.CoreState, x *app.AppX) {
|
||||
x.Config = compositor
|
||||
x.Log.SetOutput(os.Stdout)
|
||||
x.Log.SetPrefix(logs.SetBrightBlack(fmt.Sprintf("(%s) ", cs.Stage)))
|
||||
x.Log.SetFlags(log.Ldate | log.Ltime)
|
||||
},
|
||||
|
||||
// First stage: pre-init
|
||||
func(cs *corestate.CoreState, x *app.AppX) {
|
||||
*cs = *corestate.NewCorestate(&corestate.CoreState{
|
||||
UUID32DirName: "uuid",
|
||||
NodeBinName: filepath.Base(os.Args[0]),
|
||||
NodeVersion: config.NodeVersion,
|
||||
MetaDir: "./.meta",
|
||||
Stage: corestate.StagePreInit,
|
||||
StartTimestampUnix: time.Now().Unix(),
|
||||
})
|
||||
},
|
||||
|
||||
func(cs *corestate.CoreState, x *app.AppX) {
|
||||
x.Log.SetPrefix(logs.SetBlue(fmt.Sprintf("(%s) ", cs.Stage)))
|
||||
|
||||
if err := x.Config.LoadEnv(); err != nil {
|
||||
x.Log.Fatalf("env load error: %s", err)
|
||||
}
|
||||
cs.NodePath = x.Config.Env.NodePath
|
||||
|
||||
if cfgPath := x.Config.CMDLine.Run.ConfigPath; cfgPath != "" {
|
||||
x.Config.Env.ConfigPath = cfgPath
|
||||
}
|
||||
if err := x.Config.LoadConf(x.Config.Env.ConfigPath); err != nil {
|
||||
x.Log.Fatalf("conf load error: %s", err)
|
||||
}
|
||||
},
|
||||
|
||||
func(cs *corestate.CoreState, x *app.AppX) {
|
||||
uuid32, err := corestate.GetNodeUUID(filepath.Join(cs.MetaDir, "uuid"))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
if err := corestate.SetNodeUUID(filepath.Join(cs.NodePath, cs.MetaDir, cs.UUID32DirName)); err != nil {
|
||||
x.Log.Fatalf("Cannod generate node uuid: %s", err.Error())
|
||||
}
|
||||
uuid32, err = corestate.GetNodeUUID(filepath.Join(cs.MetaDir, "uuid"))
|
||||
if err != nil {
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
x.Log.Fatalf("uuid load error: %s", err)
|
||||
}
|
||||
cs.UUID32 = uuid32
|
||||
},
|
||||
|
||||
func(cs *corestate.CoreState, x *app.AppX) {
|
||||
if x.Config.Env.ParentStagePID != os.Getpid() {
|
||||
if !contains(x.Config.Conf.DisableWarnings, "--WNonStdTmpDir") && os.TempDir() != "/tmp" {
|
||||
x.Log.Printf("%s: %s", logs.PrintWarn(), "Non-standard value specified for temporary directory")
|
||||
}
|
||||
// still pre-init stage
|
||||
runDir, err := run_manager.Create(cs.UUID32)
|
||||
if err != nil {
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
cs.RunDir = runDir
|
||||
input, err := os.Open(os.Args[0])
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
if err := run_manager.Set(cs.NodeBinName); err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
fmgr := run_manager.File(cs.NodeBinName)
|
||||
output, err := fmgr.Open()
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.Copy(output, input); err != nil {
|
||||
fmgr.Close()
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
if err := os.Chmod(filepath.Join(cs.RunDir, cs.NodeBinName), 0755); err != nil {
|
||||
fmgr.Close()
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
input.Close()
|
||||
fmgr.Close()
|
||||
runArgs := os.Args
|
||||
runArgs[0] = filepath.Join(cs.RunDir, cs.NodeBinName)
|
||||
|
||||
// prepare environ
|
||||
env := utils.SetEviron(os.Environ(), fmt.Sprintf("GS_PARENT_PID=%d", os.Getpid()))
|
||||
|
||||
if err := syscall.Exec(runArgs[0], runArgs, env); err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
}
|
||||
x.Log.Printf("Node uuid is %s", cs.UUID32)
|
||||
},
|
||||
|
||||
// post-init stage
|
||||
func(cs *corestate.CoreState, x *app.AppX) {
|
||||
cs.Stage = corestate.StagePostInit
|
||||
x.Log.SetPrefix(logs.SetYellow(fmt.Sprintf("(%s) ", cs.Stage)))
|
||||
|
||||
cs.RunDir = run_manager.Toggle()
|
||||
exist, err := utils.ExistsMatchingDirs(filepath.Join(os.TempDir(), fmt.Sprintf("/*-%s-%s", cs.UUID32, "gosally-runtime")), cs.RunDir)
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
if exist {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unable to continue node operation: A node with the same identifier was found in the runtime environment")
|
||||
}
|
||||
|
||||
if err := run_manager.Set("run.lock"); err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
lockPath, err := run_manager.Get("run.lock")
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
lockFile := ini.Empty()
|
||||
secRun, err := lockFile.NewSection("runtime")
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
secRun.Key("pid").SetValue(fmt.Sprintf("%d/%d", os.Getpid(), x.Config.Env.ParentStagePID))
|
||||
secRun.Key("version").SetValue(cs.NodeVersion)
|
||||
secRun.Key("uuid").SetValue(cs.UUID32)
|
||||
secRun.Key("timestamp").SetValue(time.Unix(cs.StartTimestampUnix, 0).Format("2006-01-02/15:04:05 MST"))
|
||||
secRun.Key("timestamp-unix").SetValue(fmt.Sprintf("%d", cs.StartTimestampUnix))
|
||||
|
||||
err = lockFile.SaveTo(lockPath)
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
},
|
||||
|
||||
func(cs *corestate.CoreState, x *app.AppX) {
|
||||
cs.Stage = corestate.StageReady
|
||||
x.Log.SetPrefix(logs.SetGreen(fmt.Sprintf("(%s) ", cs.Stage)))
|
||||
|
||||
x.SLog = new(slog.Logger)
|
||||
newSlog, err := logs.SetupLogger(x.Config.Conf.Log)
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
*x.SLog = *newSlog
|
||||
},
|
||||
)
|
||||
|
||||
nodeApp.Run(func(ctx context.Context, cs *corestate.CoreState, x *app.AppX) error {
|
||||
ctxMain, cancelMain := context.WithCancel(ctx)
|
||||
runLockFile := run_manager.File("run.lock")
|
||||
_, err := runLockFile.Open()
|
||||
if err != nil {
|
||||
x.Log.Fatalf("cannot open run.lock: %s", err)
|
||||
}
|
||||
|
||||
_, err = runLockFile.Watch(ctxMain, func() {
|
||||
x.Log.Printf("run.lock was touched")
|
||||
_ = run_manager.Clean()
|
||||
cancelMain()
|
||||
})
|
||||
if err != nil {
|
||||
x.Log.Printf("watch error: %s", err)
|
||||
}
|
||||
|
||||
serverv1 := sv1.InitV1Server(&sv1.HandlerV1InitStruct{
|
||||
Log: *x.SLog,
|
||||
Config: x.Config.Conf,
|
||||
AllowedCmd: regexp.MustCompile(`^[a-zA-Z0-9]+$`),
|
||||
ListAllowedCmd: regexp.MustCompile(`^[a-zA-Z0-9_-]+$`),
|
||||
Ver: "v1",
|
||||
})
|
||||
|
||||
s := gateway.InitGateway(&gateway.GatewayServerInit{
|
||||
Log: x.SLog,
|
||||
Config: x.Config.Conf,
|
||||
}, serverv1)
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Use(cors.Handler(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "POST", "OPTIONS"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token"},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 300,
|
||||
}))
|
||||
r.HandleFunc(config.ComDirRoute, s.Handle)
|
||||
r.Route("/favicon.ico", func(r chi.Router) {
|
||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
})
|
||||
})
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: x.Config.Conf.HTTPServer.Address,
|
||||
Handler: r,
|
||||
ErrorLog: log.New(&logs.SlogWriter{
|
||||
Logger: x.SLog,
|
||||
Level: logs.GlobalLevel,
|
||||
}, "", 0),
|
||||
}
|
||||
go func() {
|
||||
if x.Config.Conf.TLS.TlsEnabled {
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%s", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port))
|
||||
if err != nil {
|
||||
x.Log.Printf("%s: Failed to start TLS listener: %s", logs.PrintError(), err.Error())
|
||||
cancelMain()
|
||||
return
|
||||
}
|
||||
x.Log.Printf("Serving on %s port %s with TLS... (https://%s%s)", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port, fmt.Sprintf("%s:%s", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port), config.ComDirRoute)
|
||||
limitedListener := netutil.LimitListener(listener, 100)
|
||||
if err := srv.ServeTLS(limitedListener, x.Config.Conf.TLS.CertFile, x.Config.Conf.TLS.KeyFile); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
x.Log.Printf("%s: Failed to start HTTPS server: %s", logs.PrintError(), err.Error())
|
||||
cancelMain()
|
||||
}
|
||||
} else {
|
||||
x.Log.Printf("Serving on %s port %s... (http://%s%s)", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port, fmt.Sprintf("%s:%s", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port), config.ComDirRoute)
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%s", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port))
|
||||
if err != nil {
|
||||
x.Log.Printf("%s: Failed to start listener: %s", logs.PrintError(), err.Error())
|
||||
cancelMain()
|
||||
return
|
||||
}
|
||||
limitedListener := netutil.LimitListener(listener, 100)
|
||||
if err := srv.Serve(limitedListener); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
x.Log.Printf("%s: Failed to start HTTP server: %s", logs.PrintError(), err.Error())
|
||||
cancelMain()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if x.Config.Conf.Updates.UpdatesEnabled {
|
||||
go func() {
|
||||
x.Updated = update.NewUpdater(ctxMain, x.Log, x.Config.Conf, x.Config.Env)
|
||||
x.Updated.Shutdownfunc(cancelMain)
|
||||
for {
|
||||
isNewUpdate, err := x.Updated.CkeckUpdates()
|
||||
if err != nil {
|
||||
x.Log.Printf("Failed to check for updates: %s", err.Error())
|
||||
}
|
||||
if isNewUpdate {
|
||||
if err := x.Updated.Update(); err != nil {
|
||||
x.Log.Printf("Failed to update: %s", err.Error())
|
||||
} else {
|
||||
x.Log.Printf("Update completed successfully")
|
||||
}
|
||||
}
|
||||
time.Sleep(x.Config.Conf.Updates.CheckInterval)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
<-ctxMain.Done()
|
||||
if err := srv.Shutdown(ctxMain); err != nil {
|
||||
x.Log.Printf("%s: Failed to stop the server gracefully: %s", logs.PrintError(), err.Error())
|
||||
} else {
|
||||
x.Log.Printf("Server stopped gracefully")
|
||||
}
|
||||
|
||||
x.Log.Println("Cleaning up...")
|
||||
|
||||
if err := run_manager.Clean(); err != nil {
|
||||
x.Log.Printf("%s: Cleanup error: %s", logs.PrintError(), err.Error())
|
||||
}
|
||||
x.Log.Println("bye!")
|
||||
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(runCmd)
|
||||
}
|
||||
22
com/Access/_common.lua
Normal file
22
com/Access/_common.lua
Normal file
@@ -0,0 +1,22 @@
|
||||
-- File com/Access/_common.lua
|
||||
--
|
||||
-- Created at 2025-21-10
|
||||
--
|
||||
-- Description:
|
||||
--- Common functions for Unit module
|
||||
|
||||
local common = {}
|
||||
|
||||
function common.CheckMissingElement(arr, cmp)
|
||||
local is_missing = {}
|
||||
local ok = true
|
||||
for _, key in ipairs(arr) do
|
||||
if cmp[key] == nil then
|
||||
table.insert(is_missing, key)
|
||||
ok = false
|
||||
end
|
||||
end
|
||||
return ok, is_missing
|
||||
end
|
||||
|
||||
return common
|
||||
30
com/Access/_errors.lua
Normal file
30
com/Access/_errors.lua
Normal file
@@ -0,0 +1,30 @@
|
||||
-- File com/Access/_errors.lua
|
||||
--
|
||||
-- Created at 2025-21-10
|
||||
-- Description:
|
||||
--- Centralized error definitions for Access operations
|
||||
--- to keep API responses consistent and clean.
|
||||
|
||||
local errors = {
|
||||
-- Common validation
|
||||
MISSING_PARAMS = { code = -32602, message = "Missing params" },
|
||||
INVALID_FIELD_TYPE = { code = -32602, message = "'fields' must be a non-empty table" },
|
||||
INVALID_BY_PARAM = { code = -32602, message = "Invalid 'by' param" },
|
||||
NO_VALID_FIELDS = { code = -32604, message = "No valid fields to update" },
|
||||
|
||||
-- Existence / duplication
|
||||
UNIT_NOT_FOUND = { code = -32102, message = "Unit is not exists" },
|
||||
UNIT_EXISTS = { code = -32101, message = "Unit is already exists" },
|
||||
|
||||
-- Database & constraint
|
||||
UNIQUE_CONSTRAINT = { code = -32602, message = "Unique constraint failed" },
|
||||
DB_QUERY_FAILED = { code = -32001, message = "Database query failed" },
|
||||
DB_EXEC_FAILED = { code = -32002, message = "Database execution failed" },
|
||||
DB_INSERT_FAILED = { code = -32003, message = "Failed to create unit" },
|
||||
DB_DELETE_FAILED = { code = -32004, message = "Failed to delete unit" },
|
||||
|
||||
-- Generic fallback
|
||||
UNKNOWN = { code = -32099, message = "Unexpected internal error" },
|
||||
}
|
||||
|
||||
return errors
|
||||
11
com/Echo.lua
Normal file
11
com/Echo.lua
Normal file
@@ -0,0 +1,11 @@
|
||||
local s = require("internal.session")
|
||||
|
||||
if not s.request.params.__fetched.data then
|
||||
s.response.error = {
|
||||
code = 123,
|
||||
message = "params.data is missing"
|
||||
}
|
||||
return
|
||||
end
|
||||
|
||||
s.response.send(s.request.params.__fetched)
|
||||
57
com/List.lua
Normal file
57
com/List.lua
Normal file
@@ -0,0 +1,57 @@
|
||||
-- com/List.lua
|
||||
|
||||
local session = require("internal.session")
|
||||
|
||||
local params = session.request.params.get()
|
||||
|
||||
if params.about then
|
||||
session.response.result = {
|
||||
description = "Returns a list of available methods",
|
||||
params = {
|
||||
layer = "select which layer list to display"
|
||||
}
|
||||
}
|
||||
return
|
||||
end
|
||||
|
||||
local function isValidName(name)
|
||||
return name:match("^[%w]+$") ~= nil
|
||||
end
|
||||
|
||||
local function scanDirectory(basePath, targetPath)
|
||||
local res = {}
|
||||
local fullPath = basePath.."/"..targetPath
|
||||
local handle = io.popen('find "'..fullPath..'" -type f -name "*.lua" 2>/dev/null')
|
||||
|
||||
if handle then
|
||||
for filePath in handle:lines() do
|
||||
local parts = {}
|
||||
for part in filePath:gsub(".lua$", ""):gmatch("[^/]+") do
|
||||
table.insert(parts, part)
|
||||
end
|
||||
|
||||
local allValid = true
|
||||
for _, part in ipairs(parts) do
|
||||
if not isValidName(part) then
|
||||
allValid = false
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
if allValid then
|
||||
local relPath = filePath:gsub("^"..basePath.."/", ""):gsub(".lua$", ""):gsub("/", ">")
|
||||
table.insert(res, relPath)
|
||||
end
|
||||
end
|
||||
handle:close()
|
||||
end
|
||||
|
||||
return #res > 0 and res or nil
|
||||
end
|
||||
|
||||
local basePath = "com"
|
||||
local layer = params.layer and params.layer:gsub(">", "/") or nil
|
||||
|
||||
session.response.send({
|
||||
answer = layer and scanDirectory(basePath, layer) or scanDirectory(basePath, "")
|
||||
})
|
||||
68
com/Unit/Create.lua
Normal file
68
com/Unit/Create.lua
Normal file
@@ -0,0 +1,68 @@
|
||||
-- File com/Unit/Create.lua
|
||||
--
|
||||
-- Created at 2025-05-10 18:23
|
||||
--
|
||||
-- Updated at -
|
||||
-- Description:
|
||||
--- Creates a record in the unit.db database without
|
||||
--- requiring additional permissions. Requires username,
|
||||
--- password (hashing occurs at the server level), and email fields.
|
||||
|
||||
local log = require("internal.log")
|
||||
local db = require("internal.database.sqlite").connect("db/unit.db", {log = true})
|
||||
local session = require("internal.session")
|
||||
local crypt = require("internal.crypt.bcrypt")
|
||||
local sha256 = require("internal.crypt.sha256")
|
||||
|
||||
local common = require("com/Unit/_common")
|
||||
local errors = require("com/Unit/_errors")
|
||||
|
||||
-- Preparing for first db query
|
||||
local function close_db()
|
||||
if db then
|
||||
log.debug("Closing DB connection")
|
||||
db:close()
|
||||
db = nil
|
||||
end
|
||||
end
|
||||
|
||||
local params = session.request.params.get()
|
||||
|
||||
local ok, mp = common.CheckMissingElement({"username", "password", "email"}, params)
|
||||
if not ok then
|
||||
close_db()
|
||||
session.response.send_error(errors.MISSING_PARAMS.code, errors.MISSING_PARAMS.message, mp)
|
||||
end
|
||||
|
||||
local hashPass = crypt.generate(params.password, crypt.DefaultCost)
|
||||
local unitID = string.sub(sha256.hash(session.__seed), 1, 16)
|
||||
|
||||
local ctx, err = db:exec(
|
||||
"INSERT INTO units (user_id, username, email, password) VALUES (?, ?, ?, ?)",
|
||||
{
|
||||
unitID,
|
||||
params.username,
|
||||
params.email,
|
||||
hashPass,
|
||||
}
|
||||
)
|
||||
|
||||
if err ~= nil then
|
||||
log.error("Insert failed: "..tostring(err))
|
||||
close_db()
|
||||
session.response.send_error(errors.DB_INSERT_FAILED.code, errors.DB_INSERT_FAILED.message)
|
||||
end
|
||||
|
||||
local _, err = ctx:wait()
|
||||
if err ~= nil then
|
||||
close_db()
|
||||
if tostring(err):match("UNIQUE constraint failed") then
|
||||
session.response.send_error(errors.UNIT_EXISTS.code, errors.UNIT_EXISTS.message)
|
||||
else
|
||||
log.error("Insert confirmation failed: "..tostring(err))
|
||||
session.response.send_error()
|
||||
end
|
||||
end
|
||||
|
||||
close_db()
|
||||
session.response.send({unit_id = unitID})
|
||||
77
com/Unit/Delete.lua
Normal file
77
com/Unit/Delete.lua
Normal file
@@ -0,0 +1,77 @@
|
||||
-- File com/Unit/Delete.lua
|
||||
--
|
||||
-- Created at 2025-05-10 19:18
|
||||
--
|
||||
-- Updated at -
|
||||
|
||||
local log = require("internal.log")
|
||||
local db = require("internal.database.sqlite").connect("db/unit.db", {log = true})
|
||||
local session = require("internal.session")
|
||||
|
||||
local common = require("com/Unit/_common")
|
||||
local errors = require("com/Unit/_errors")
|
||||
|
||||
-- Preparing for first db query
|
||||
local function close_db()
|
||||
if db then
|
||||
log.debug("Closing DB connection")
|
||||
db:close()
|
||||
db = nil
|
||||
end
|
||||
end
|
||||
|
||||
local params = session.request.params.get()
|
||||
|
||||
local ok, mp = common.CheckMissingElement({"user_id"}, params)
|
||||
if not ok then
|
||||
close_db()
|
||||
session.response.send_error(errors.MISSING_PARAMS.code, errors.MISSING_PARAMS.message, mp)
|
||||
end
|
||||
|
||||
local existing, err = db:query([[
|
||||
SELECT 1
|
||||
FROM units
|
||||
WHERE user_id = ?
|
||||
AND entry_status != 'deleted'
|
||||
AND deleted_at IS NULL
|
||||
LIMIT 1
|
||||
]], {
|
||||
params.user_id
|
||||
})
|
||||
|
||||
if err ~= nil then
|
||||
log.error("Email check failed: "..tostring(err))
|
||||
close_db()
|
||||
session.response.send_error()
|
||||
end
|
||||
|
||||
if existing and #existing == 0 then
|
||||
close_db()
|
||||
session.response.send_error(errors.UNIT_NOT_FOUND.code, errors.UNIT_NOT_FOUND.message)
|
||||
end
|
||||
|
||||
local ctx, err = db:exec(
|
||||
[[
|
||||
UPDATE units
|
||||
SET entry_status = 'deleted',
|
||||
deleted_at = CURRENT_TIMESTAMP
|
||||
WHERE user_id = ? AND deleted_at is NULL
|
||||
]],
|
||||
{ params.user_id }
|
||||
)
|
||||
|
||||
if err ~= nil then
|
||||
log.error("Soft delete failed: " .. tostring(err))
|
||||
close_db()
|
||||
session.response.send_error(errors.DB_DELETE_FAILED.code, errors.DB_DELETE_FAILED.message)
|
||||
end
|
||||
|
||||
local res, err = ctx:wait()
|
||||
if err ~= nil then
|
||||
log.error("Soft delete confirmation failed: " .. tostring(err))
|
||||
close_db()
|
||||
session.response.send_error(errors.DB_DELETE_FAILED.code, errors.DB_DELETE_FAILED.message)
|
||||
end
|
||||
|
||||
close_db()
|
||||
session.response.send()
|
||||
55
com/Unit/Get.lua
Normal file
55
com/Unit/Get.lua
Normal file
@@ -0,0 +1,55 @@
|
||||
-- File com/Unit/Get.lua
|
||||
--
|
||||
-- Created at 2025-09-25 20:04
|
||||
--
|
||||
-- Updated at -
|
||||
|
||||
local log = require("internal.log")
|
||||
local db = require("internal.database.sqlite").connect("db/unit.db", {log = true})
|
||||
local session = require("internal.session")
|
||||
|
||||
local common = require("com/Unit/_common")
|
||||
local errors = require("com/Unit/_errors")
|
||||
|
||||
-- Preparing for first db query
|
||||
local function close_db()
|
||||
if db then
|
||||
log.debug("Closing DB connection")
|
||||
db:close()
|
||||
db = nil
|
||||
end
|
||||
end
|
||||
|
||||
local params = session.request.params.get()
|
||||
|
||||
local ok, mp = common.CheckMissingElement({"by", "value"}, params)
|
||||
if not ok then
|
||||
close_db()
|
||||
session.response.send_error(errors.MISSING_PARAMS.code, errors.MISSING_PARAMS.message, mp)
|
||||
end
|
||||
|
||||
if not (params.by == "email" or params.by == "username" or params.by == "user_id") then
|
||||
close_db()
|
||||
session.response.send_error(errors.INVALID_BY_PARAM.code, errors.INVALID_BY_PARAM.message)
|
||||
end
|
||||
|
||||
local unit, err = db:query_row(
|
||||
"SELECT user_id, username, email, created_at, updated_at, deleted_at, entry_status FROM units WHERE "..params.by.." = ? AND deleted_at IS NULL LIMIT 1",
|
||||
{
|
||||
params.value
|
||||
}
|
||||
)
|
||||
|
||||
if err then
|
||||
close_db()
|
||||
log.error("DB query error: " .. tostring(err))
|
||||
session.response.send_error()
|
||||
end
|
||||
|
||||
if not unit then
|
||||
close_db()
|
||||
session.response.send_error(errors.UNIT_NOT_FOUND.code, errors.UNIT_NOT_FOUND.message)
|
||||
end
|
||||
|
||||
close_db()
|
||||
session.response.send(unit)
|
||||
102
com/Unit/Update.lua
Normal file
102
com/Unit/Update.lua
Normal file
@@ -0,0 +1,102 @@
|
||||
-- File com/Unit/Update.lua
|
||||
--
|
||||
-- Created at 2025-10-10
|
||||
--
|
||||
|
||||
local log = require("internal.log")
|
||||
local db = require("internal.database.sqlite").connect("db/unit.db", { log = true })
|
||||
local session = require("internal.session")
|
||||
|
||||
local common = require("com/Unit/_common")
|
||||
local errors = require("com/Unit/_errors")
|
||||
|
||||
local function close_db()
|
||||
if db then
|
||||
log.debug("Closing DB connection")
|
||||
db:close()
|
||||
db = nil
|
||||
end
|
||||
end
|
||||
|
||||
local params = session.request.params.get()
|
||||
|
||||
local ok, mp = common.CheckMissingElement({"user_id", "fields"}, params)
|
||||
if not ok then
|
||||
close_db()
|
||||
session.response.send_error(errors.MISSING_PARAMS.code, errors.MISSING_PARAMS.message, mp)
|
||||
end
|
||||
|
||||
if type(params.fields) ~= "table" or next(params.fields) == nil then
|
||||
close_db()
|
||||
session.response.send_error(errors.INVALID_FIELD_TYPE.code, errors.INVALID_FIELD_TYPE.message)
|
||||
end
|
||||
|
||||
local allowed = {
|
||||
username = true,
|
||||
email = true,
|
||||
password = true,
|
||||
entry_status = true
|
||||
}
|
||||
|
||||
local exists = db:query_row(
|
||||
"SELECT 1 FROM units WHERE user_id = ? AND deleted_at IS NULL LIMIT 1",
|
||||
{ params.user_id }
|
||||
)
|
||||
|
||||
if not exists then
|
||||
close_db()
|
||||
session.response.send_error(errors.UNIT_NOT_FOUND.code, errors.UNIT_NOT_FOUND.message)
|
||||
end
|
||||
|
||||
local set_clauses = {}
|
||||
local values = {}
|
||||
|
||||
for k, v in pairs(params.fields) do
|
||||
if allowed[k] then
|
||||
if k == "password" then
|
||||
local crypt = require("internal.crypt.bcrypt")
|
||||
v = crypt.generate(v, crypt.DefaultCost)
|
||||
end
|
||||
table.insert(set_clauses, k .. " = ?")
|
||||
table.insert(values, v)
|
||||
else
|
||||
log.warn("Ignoring unsupported field: " .. k)
|
||||
end
|
||||
end
|
||||
|
||||
if #set_clauses == 0 then
|
||||
close_db()
|
||||
session.response.send_error(errors.NO_VALID_FIELDS.code, errors.NO_VALID_FIELDS.message)
|
||||
end
|
||||
|
||||
table.insert(set_clauses, "updated_at = CURRENT_TIMESTAMP")
|
||||
|
||||
local query = "UPDATE units SET " .. table.concat(set_clauses, ", ")
|
||||
.. " WHERE user_id = ? AND deleted_at IS NULL"
|
||||
|
||||
table.insert(values, params.user_id)
|
||||
|
||||
local ctx, err = db:exec(query, values)
|
||||
if not ctx then
|
||||
close_db()
|
||||
if tostring(err):match("UNIQUE constraint failed") then
|
||||
session.response.send_error(errors.UNIQUE_CONSTRAINT.code, errors.UNIQUE_CONSTRAINT.message)
|
||||
else
|
||||
session.response.send_error()
|
||||
end
|
||||
end
|
||||
|
||||
local _, err = ctx:wait()
|
||||
if err ~= nil then
|
||||
close_db()
|
||||
if tostring(err):match("UNIQUE constraint failed") then
|
||||
session.response.send_error(errors.UNIQUE_CONSTRAINT.code, errors.UNIQUE_CONSTRAINT.message)
|
||||
else
|
||||
log.error("Insert confirmation failed: "..tostring(err))
|
||||
session.response.send_error()
|
||||
end
|
||||
end
|
||||
|
||||
close_db()
|
||||
|
||||
session.response.send()
|
||||
23
com/Unit/_common.lua
Normal file
23
com/Unit/_common.lua
Normal file
@@ -0,0 +1,23 @@
|
||||
-- File com/Unit/_common.lua
|
||||
--
|
||||
-- Created at 2025-05-10 18:23
|
||||
--
|
||||
-- Updated at -
|
||||
-- Description:
|
||||
--- Common functions for Unit module
|
||||
|
||||
local common = {}
|
||||
|
||||
function common.CheckMissingElement(arr, cmp)
|
||||
local is_missing = {}
|
||||
local ok = true
|
||||
for _, key in ipairs(arr) do
|
||||
if cmp[key] == nil then
|
||||
table.insert(is_missing, key)
|
||||
ok = false
|
||||
end
|
||||
end
|
||||
return ok, is_missing
|
||||
end
|
||||
|
||||
return common
|
||||
30
com/Unit/_errors.lua
Normal file
30
com/Unit/_errors.lua
Normal file
@@ -0,0 +1,30 @@
|
||||
-- File com/Unit/_errors.lua
|
||||
--
|
||||
-- Created at 2025-10-10
|
||||
-- Description:
|
||||
--- Centralized error definitions for Unit operations
|
||||
--- to keep API responses consistent and clean.
|
||||
|
||||
local errors = {
|
||||
-- Common validation
|
||||
MISSING_PARAMS = { code = -32602, message = "Missing params" },
|
||||
INVALID_FIELD_TYPE = { code = -32602, message = "'fields' must be a non-empty table" },
|
||||
INVALID_BY_PARAM = { code = -32602, message = "Invalid 'by' param" },
|
||||
NO_VALID_FIELDS = { code = -32604, message = "No valid fields to update" },
|
||||
|
||||
-- Existence / duplication
|
||||
UNIT_NOT_FOUND = { code = -32102, message = "Unit is not exists" },
|
||||
UNIT_EXISTS = { code = -32101, message = "Unit is already exists" },
|
||||
|
||||
-- Database & constraint
|
||||
UNIQUE_CONSTRAINT = { code = -32602, message = "Unique constraint failed" },
|
||||
DB_QUERY_FAILED = { code = -32001, message = "Database query failed" },
|
||||
DB_EXEC_FAILED = { code = -32002, message = "Database execution failed" },
|
||||
DB_INSERT_FAILED = { code = -32003, message = "Failed to create unit" },
|
||||
DB_DELETE_FAILED = { code = -32004, message = "Failed to delete unit" },
|
||||
|
||||
-- Generic fallback
|
||||
UNKNOWN = { code = -32099, message = "Unexpected internal error" },
|
||||
}
|
||||
|
||||
return errors
|
||||
@@ -1,11 +1,54 @@
|
||||
---@alias AnyTable table<string, any>
|
||||
--@diagnostic disable: missing-fields, missing-return
|
||||
|
||||
---@type AnyTable
|
||||
In = {
|
||||
Params = {},
|
||||
}
|
||||
---@alias Any any
|
||||
---@alias AnyTable table<string, Any>
|
||||
|
||||
---@type AnyTable
|
||||
Out = {
|
||||
Result = {},
|
||||
}
|
||||
--- Global session module interface
|
||||
---@class SessionIn
|
||||
---@field params AnyTable Request parameters
|
||||
|
||||
---@class SessionOut
|
||||
---@field result Any|string? Result payload (table or primitive)
|
||||
---@field error { code: integer, message: string, data: Any }? Optional error info
|
||||
|
||||
---@class SessionModule
|
||||
---@field request SessionIn Input context (read-only)
|
||||
---@field response SessionOut Output context (write results/errors)
|
||||
|
||||
--- Global log module interface
|
||||
---@class LogModule
|
||||
---@field info fun(msg: string) Log informational message
|
||||
---@field debug fun(msg: string) Log debug message
|
||||
---@field error fun(msg: string) Log error message
|
||||
---@field warn fun(msg: string) Log warning message
|
||||
---@field event fun(msg: string) Log event (generic)
|
||||
---@field event_error fun(msg: string) Log event error
|
||||
---@field event_warn fun(msg: string) Log event warning
|
||||
|
||||
--- Global net module interface
|
||||
---@class HttpResponse
|
||||
---@field status integer HTTP status code
|
||||
---@field status_text string HTTP status text
|
||||
---@field body string Response body
|
||||
---@field content_length integer Content length
|
||||
---@field headers AnyTable Map of headers
|
||||
|
||||
---@class HttpModule
|
||||
---@field get fun(log: boolean, url: string): HttpResponse, string? Perform GET
|
||||
---@field post fun(log: boolean, url: string, content_type: string, payload: string): HttpResponse, string? Perform POST
|
||||
|
||||
---@class NetModule
|
||||
---@field http HttpModule HTTP client functions
|
||||
|
||||
--- Global variables declaration
|
||||
---@global
|
||||
---@type SessionModule
|
||||
_G.session = session or {}
|
||||
|
||||
---@global
|
||||
---@type LogModule
|
||||
_G.log = log or {}
|
||||
|
||||
---@global
|
||||
---@type NetModule
|
||||
_G.net = net or {}
|
||||
@@ -2,15 +2,3 @@
|
||||
package.path = package.path .. ";/usr/lib64/lua/5.1/?.lua;/usr/local/share/lua/5.1/?.lua" .. ";./com/?.lua;"
|
||||
package.cpath = package.cpath .. ";/usr/lib64/lua/5.1/?.so;/usr/local/lib/lua/5.1/?.so"
|
||||
|
||||
print = function() end
|
||||
io.write = function(...) end
|
||||
io.stdout = function() return nil end
|
||||
io.stderr = function() return nil end
|
||||
io.read = function(...) return nil end
|
||||
|
||||
---@type table<string, any>
|
||||
Status = {
|
||||
ok = "ok",
|
||||
error = "error",
|
||||
invalid = "invalid",
|
||||
}
|
||||
|
||||
13
com/echo.lua
13
com/echo.lua
@@ -1,13 +0,0 @@
|
||||
--- #description = "Echoes back the message."
|
||||
--- #args
|
||||
--- msg = the message
|
||||
|
||||
if not In.Params.msg or In.Params.msg == "" then
|
||||
Out.Result.status = Status.error
|
||||
Out.Result.error = "Missing parameter: msg"
|
||||
return
|
||||
end
|
||||
|
||||
Out.Result.status = Status.ok
|
||||
Out.Result.answer = In.Params.msg
|
||||
return
|
||||
@@ -1,21 +0,0 @@
|
||||
mode: "prod"
|
||||
|
||||
http_server:
|
||||
address: "0.0.0.0:8080"
|
||||
api:
|
||||
latest-version: v1
|
||||
layers:
|
||||
- b1
|
||||
- s2
|
||||
|
||||
tls:
|
||||
enabled: false
|
||||
cert_file: "./cert/fullchain.pem"
|
||||
key_file: "./cert/privkey.pem"
|
||||
|
||||
com_dir: "com/"
|
||||
|
||||
updates:
|
||||
enabled: true
|
||||
check-interval: 1h
|
||||
repository_url: "https://repo.serve.lv/raw/go-sally"
|
||||
@@ -1,78 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
lua "github.com/yuin/gopher-lua"
|
||||
)
|
||||
|
||||
func ConvertLuaTypesToGolang(value lua.LValue) any {
|
||||
switch value.Type() {
|
||||
case lua.LTString:
|
||||
return value.String()
|
||||
case lua.LTNumber:
|
||||
return float64(value.(lua.LNumber))
|
||||
case lua.LTBool:
|
||||
return bool(value.(lua.LBool))
|
||||
case lua.LTTable:
|
||||
tbl := value.(*lua.LTable)
|
||||
|
||||
// Попробуем как массив
|
||||
var arr []any
|
||||
isArray := true
|
||||
tbl.ForEach(func(key, val lua.LValue) {
|
||||
if key.Type() != lua.LTNumber {
|
||||
isArray = false
|
||||
}
|
||||
arr = append(arr, ConvertLuaTypesToGolang(val))
|
||||
})
|
||||
|
||||
if isArray {
|
||||
return arr
|
||||
}
|
||||
|
||||
result := make(map[string]any)
|
||||
tbl.ForEach(func(key, val lua.LValue) {
|
||||
result[key.String()] = ConvertLuaTypesToGolang(val)
|
||||
})
|
||||
return result
|
||||
|
||||
case lua.LTNil:
|
||||
return nil
|
||||
default:
|
||||
return value.String()
|
||||
}
|
||||
}
|
||||
|
||||
func ConvertGolangTypesToLua(L *lua.LState, val any) lua.LValue {
|
||||
switch v := val.(type) {
|
||||
case string:
|
||||
return lua.LString(v)
|
||||
case bool:
|
||||
return lua.LBool(v)
|
||||
case int:
|
||||
return lua.LNumber(float64(v))
|
||||
case int64:
|
||||
return lua.LNumber(float64(v))
|
||||
case float32:
|
||||
return lua.LNumber(float64(v))
|
||||
case float64:
|
||||
return lua.LNumber(v)
|
||||
case []any:
|
||||
tbl := L.NewTable()
|
||||
for i, item := range v {
|
||||
tbl.RawSetInt(i+1, ConvertGolangTypesToLua(L, item))
|
||||
}
|
||||
return tbl
|
||||
case map[string]any:
|
||||
tbl := L.NewTable()
|
||||
for key, value := range v {
|
||||
tbl.RawSetString(key, ConvertGolangTypesToLua(L, value))
|
||||
}
|
||||
return tbl
|
||||
case nil:
|
||||
return lua.LNil
|
||||
default:
|
||||
return lua.LString(fmt.Sprintf("%v", v))
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/update"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
|
||||
)
|
||||
|
||||
type AppContract interface {
|
||||
InitialHooks(fn ...func(cs *corestate.CoreState, x *AppX))
|
||||
Run(fn func(ctx context.Context, cs *corestate.CoreState, x *AppX) error)
|
||||
}
|
||||
|
||||
type App struct {
|
||||
initHooks []func(cs *corestate.CoreState, x *AppX)
|
||||
runHook func(ctx context.Context, cs *corestate.CoreState, x *AppX) error
|
||||
|
||||
Corestate *corestate.CoreState
|
||||
AppX *AppX
|
||||
}
|
||||
|
||||
type AppX struct {
|
||||
Config *config.Compositor
|
||||
Log *log.Logger
|
||||
SLog *slog.Logger
|
||||
Updated *update.Updater
|
||||
}
|
||||
|
||||
func New() AppContract {
|
||||
return &App{
|
||||
AppX: &AppX{
|
||||
Log: log.Default(),
|
||||
},
|
||||
Corestate: &corestate.CoreState{},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) InitialHooks(fn ...func(cs *corestate.CoreState, x *AppX)) {
|
||||
a.initHooks = append(a.initHooks, fn...)
|
||||
}
|
||||
|
||||
func (a *App) Run(fn func(ctx context.Context, cs *corestate.CoreState, x *AppX) error) {
|
||||
a.runHook = fn
|
||||
|
||||
for _, hook := range a.initHooks {
|
||||
hook(a.Corestate, a.AppX)
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
defer stop()
|
||||
|
||||
if a.runHook != nil {
|
||||
if err := a.runHook(ctx, a.Corestate, a.AppX); err != nil {
|
||||
log.Fatalf("fatal in Run: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
// Package config provides configuration management for the application.
|
||||
// config is built on top of the third-party module cleanenv
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type CompositorContract interface {
|
||||
LoadEnv() error
|
||||
LoadConf(path string) error
|
||||
}
|
||||
|
||||
type Compositor struct {
|
||||
CMDLine *CMDLine
|
||||
Conf *Conf
|
||||
Env *Env
|
||||
}
|
||||
|
||||
type Conf struct {
|
||||
Mode string `mapstructure:"mode"`
|
||||
ComDir string `mapstructure:"com_dir"`
|
||||
HTTPServer HTTPServer `mapstructure:"http_server"`
|
||||
TLS TLS `mapstructure:"tls"`
|
||||
Updates Updates `mapstructure:"updates"`
|
||||
Log Log `mapstructure:"log"`
|
||||
DisableWarnings []string `mapstructure:"disable_warnings"`
|
||||
}
|
||||
|
||||
type HTTPServer struct {
|
||||
Address string `mapstructure:"address"`
|
||||
Port string `mapstructure:"port"`
|
||||
Timeout time.Duration `mapstructure:"timeout"`
|
||||
IdleTimeout time.Duration `mapstructure:"idle_timeout"`
|
||||
HTTPServer_Api HTTPServer_Api `mapstructure:"api"`
|
||||
}
|
||||
|
||||
type HTTPServer_Api struct {
|
||||
LatestVer string `mapstructure:"latest-version"`
|
||||
Layers []string `mapstructure:"layers"`
|
||||
}
|
||||
|
||||
type TLS struct {
|
||||
TlsEnabled bool `mapstructure:"enabled"`
|
||||
CertFile string `mapstructure:"cert_file"`
|
||||
KeyFile string `mapstructure:"key_file"`
|
||||
}
|
||||
|
||||
type Updates struct {
|
||||
UpdatesEnabled bool `mapstructure:"enabled"`
|
||||
CheckInterval time.Duration `mapstructure:"check_interval"`
|
||||
RepositoryURL string `mapstructure:"repository_url"`
|
||||
WantedVersion string `mapstructure:"wanted_version"`
|
||||
}
|
||||
|
||||
type Log struct {
|
||||
Level string `mapstructure:"level"`
|
||||
OutPath string `mapstructure:"out_path"`
|
||||
}
|
||||
|
||||
// ConfigEnv structure for environment variables
|
||||
type Env struct {
|
||||
ConfigPath string `mapstructure:"config_path"`
|
||||
NodePath string `mapstructure:"node_path"`
|
||||
ParentStagePID int `mapstructure:"parent_pid"`
|
||||
}
|
||||
|
||||
type CMDLine struct {
|
||||
Run Run
|
||||
Node Root
|
||||
}
|
||||
|
||||
type Root struct {
|
||||
Debug bool `persistent:"true" full:"debug" short:"d" def:"false" desc:"Set debug mode"`
|
||||
}
|
||||
|
||||
type Run struct {
|
||||
ConfigPath string `persistent:"true" full:"config" short:"c" def:"./config.yaml" desc:"Path to configuration file"`
|
||||
Test []int `persistent:"true" full:"test" short:"t" def:"" desc:"js test"`
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/server/rpc"
|
||||
)
|
||||
|
||||
func (gs *GatewayServer) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
var req rpc.RPCRequest
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
rpc.WriteRouterError(w, http.StatusBadRequest, &rpc.RPCError{
|
||||
JSONRPC: rpc.JSONRPCVersion,
|
||||
ID: nil,
|
||||
Error: map[string]any{
|
||||
"code": rpc.ErrInternalError,
|
||||
"message": rpc.ErrInternalErrorS,
|
||||
},
|
||||
})
|
||||
gs.log.Info("invalid request received", slog.String("issue", rpc.ErrInternalErrorS))
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
rpc.WriteRouterError(w, http.StatusBadRequest, &rpc.RPCError{
|
||||
JSONRPC: rpc.JSONRPCVersion,
|
||||
ID: nil,
|
||||
Error: map[string]any{
|
||||
"code": rpc.ErrParseError,
|
||||
"message": rpc.ErrParseErrorS,
|
||||
},
|
||||
})
|
||||
gs.log.Info("invalid request received", slog.String("issue", rpc.ErrParseErrorS))
|
||||
return
|
||||
}
|
||||
|
||||
if req.JSONRPC != rpc.JSONRPCVersion {
|
||||
rpc.WriteRouterError(w, http.StatusBadRequest, &rpc.RPCError{
|
||||
JSONRPC: rpc.JSONRPCVersion,
|
||||
ID: req.ID,
|
||||
Error: map[string]any{
|
||||
"code": rpc.ErrInvalidRequest,
|
||||
"message": rpc.ErrInvalidRequestS,
|
||||
},
|
||||
})
|
||||
gs.log.Info("invalid request received", slog.String("issue", rpc.ErrInvalidRequestS), slog.String("requested-version", req.JSONRPC))
|
||||
return
|
||||
}
|
||||
|
||||
gs.Route(w, r, req)
|
||||
}
|
||||
|
||||
func (gs *GatewayServer) Route(w http.ResponseWriter, r *http.Request, req rpc.RPCRequest) {
|
||||
server, ok := gs.servers[serversApiVer(req.Params.ContextVersion)]
|
||||
if !ok {
|
||||
rpc.WriteRouterError(w, http.StatusBadRequest, &rpc.RPCError{
|
||||
JSONRPC: rpc.JSONRPCVersion,
|
||||
ID: req.ID,
|
||||
Error: map[string]any{
|
||||
"code": rpc.ErrContextVersion,
|
||||
"message": rpc.ErrContextVersionS,
|
||||
},
|
||||
})
|
||||
gs.log.Info("invalid request received", slog.String("issue", rpc.ErrContextVersionS), slog.String("requested-version", req.Params.ContextVersion))
|
||||
return
|
||||
}
|
||||
|
||||
// checks if request is notification
|
||||
if req.ID == nil {
|
||||
rr := httptest.NewRecorder()
|
||||
server.Handle(rr, r, req)
|
||||
return
|
||||
}
|
||||
server.Handle(w, r, req)
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package rpc
|
||||
|
||||
type RPCRequest struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID any `json:"id"`
|
||||
Method string `json:"method"`
|
||||
Params RPCRequestParams `json:"params"`
|
||||
}
|
||||
|
||||
type RPCRequestParams struct {
|
||||
ContextVersion string `json:"context-version"`
|
||||
Method map[string]any `json:"method-params"`
|
||||
}
|
||||
|
||||
type RPCResponse struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID any `json:"id"`
|
||||
Result any `json:"result"`
|
||||
}
|
||||
|
||||
type RPCError struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID any `json:"id"`
|
||||
Error any `json:"error"`
|
||||
}
|
||||
|
||||
const (
|
||||
JSONRPCVersion = "2.0"
|
||||
)
|
||||
@@ -1,342 +0,0 @@
|
||||
package sv1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/server/rpc"
|
||||
)
|
||||
|
||||
func (h *HandlerV1) Handle(w http.ResponseWriter, r *http.Request, req rpc.RPCRequest) {
|
||||
w.Write([]byte("Sigmas"))
|
||||
}
|
||||
|
||||
// func (h *HandlerV1) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
// var req PettiRequest
|
||||
// // server, ok := s.servers[serversApiVer(payload.PettiVer)]
|
||||
// // if !ok {
|
||||
// // WriteRouterError(w, &RouterError{
|
||||
// // Status: "error",
|
||||
// // StatusCode: http.StatusBadRequest,
|
||||
// // Payload: map[string]any{
|
||||
// // "Message": InvalidProtovolVersion,
|
||||
// // },
|
||||
// // })
|
||||
// // s.log.Info("invalid request received", slog.String("issue", InvalidProtovolVersion), slog.String("requested-version", payload.PettiVer))
|
||||
// // return
|
||||
// // }
|
||||
// if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
// utils.WriteJSONError(w, http.StatusBadRequest, "невалидный JSON: "+err.Error())
|
||||
// return
|
||||
// }
|
||||
|
||||
// if req.PettiVer == "" {
|
||||
// utils.WriteJSONError(w, http.StatusBadRequest, "отсутствует PettiVer")
|
||||
// return
|
||||
// }
|
||||
// if req.PettiVer != h.GetVersion() {
|
||||
// utils.WriteJSONError(w, http.StatusBadRequest, "неподдерживаемая версия PettiVer")
|
||||
// return
|
||||
// }
|
||||
// if req.PackageType.Request == "" {
|
||||
// utils.WriteJSONError(w, http.StatusBadRequest, "отсутствует PackageType.Request")
|
||||
// return
|
||||
// }
|
||||
// if req.Payload == nil {
|
||||
// utils.WriteJSONError(w, http.StatusBadRequest, "отсутствует Payload")
|
||||
// return
|
||||
// }
|
||||
// cmdRaw, ok := req.Payload["Exec"].(string)
|
||||
// if !ok || cmdRaw == "" {
|
||||
// utils.WriteJSONError(w, http.StatusBadRequest, "Payload.Exec отсутствует или некорректен")
|
||||
// return
|
||||
// }
|
||||
// cmd := cmdRaw
|
||||
|
||||
// if !h.allowedCmd.MatchString(string([]rune(cmd)[0])) || !h.listAllowedCmd.MatchString(cmd) {
|
||||
// utils.WriteJSONError(w, http.StatusBadRequest, "команда запрещена")
|
||||
// return
|
||||
// }
|
||||
|
||||
// // ===== Проверка скрипта
|
||||
// scriptPath := h.comMatch(h.GetVersion(), cmd)
|
||||
// if scriptPath == "" {
|
||||
// utils.WriteJSONError(w, http.StatusNotFound, "команда не найдена")
|
||||
// return
|
||||
// }
|
||||
// fullPath := filepath.Join(h.cfg.ComDir, scriptPath)
|
||||
// if _, err := os.Stat(fullPath); err != nil {
|
||||
// utils.WriteJSONError(w, http.StatusNotFound, "файл команды не найден")
|
||||
// return
|
||||
// }
|
||||
|
||||
// // ===== Запуск Lua
|
||||
// L := lua.NewState()
|
||||
// defer L.Close()
|
||||
|
||||
// inTable := L.NewTable()
|
||||
// paramsTable := L.NewTable()
|
||||
// if params, ok := req.Payload["PassedParameters"].(map[string]interface{}); ok {
|
||||
// for k, v := range params {
|
||||
// L.SetField(paramsTable, k, utils.ConvertGolangTypesToLua(L, v))
|
||||
// }
|
||||
// }
|
||||
// L.SetField(inTable, "Params", paramsTable)
|
||||
// L.SetGlobal("In", inTable)
|
||||
|
||||
// resultTable := L.NewTable()
|
||||
// outTable := L.NewTable()
|
||||
// L.SetField(outTable, "Result", resultTable)
|
||||
// L.SetGlobal("Out", outTable)
|
||||
|
||||
// prepareLua := filepath.Join(h.cfg.ComDir, "_prepare.lua")
|
||||
// if _, err := os.Stat(prepareLua); err == nil {
|
||||
// if err := L.DoFile(prepareLua); err != nil {
|
||||
// utils.WriteJSONError(w, http.StatusInternalServerError, "lua _prepare ошибка: "+err.Error())
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
// if err := L.DoFile(fullPath); err != nil {
|
||||
// utils.WriteJSONError(w, http.StatusInternalServerError, "lua exec ошибка: "+err.Error())
|
||||
// return
|
||||
// }
|
||||
|
||||
// lv := L.GetGlobal("Out")
|
||||
// tbl, ok := lv.(*lua.LTable)
|
||||
// if !ok {
|
||||
// utils.WriteJSONError(w, http.StatusInternalServerError, "'Out' не таблица")
|
||||
// return
|
||||
// }
|
||||
// resultVal := tbl.RawGetString("Result")
|
||||
// resultTbl, ok := resultVal.(*lua.LTable)
|
||||
// if !ok {
|
||||
// utils.WriteJSONError(w, http.StatusInternalServerError, "'Result' не таблица")
|
||||
// return
|
||||
// }
|
||||
|
||||
// out := make(map[string]any)
|
||||
// resultTbl.ForEach(func(key lua.LValue, value lua.LValue) {
|
||||
// out[key.String()] = utils.ConvertLuaTypesToGolang(value)
|
||||
// })
|
||||
|
||||
// uuid32, _ := corestate.GetNodeUUID(filepath.Join(config.MetaDir, "uuid"))
|
||||
|
||||
// resp := PettiResponse{
|
||||
// PettiVer: req.PettiVer,
|
||||
// ResponsibleAgentUUID: uuid32,
|
||||
// PackageType: struct {
|
||||
// AnswerOf string `json:"AnswerOf"`
|
||||
// }{AnswerOf: req.PackageType.Request},
|
||||
// Payload: map[string]any{
|
||||
// "RequestedCommand": cmd,
|
||||
// "Response": out,
|
||||
// },
|
||||
// }
|
||||
|
||||
// // ===== Финальная проверка на сериализацию (валидность сборки)
|
||||
// respData, err := json.Marshal(resp)
|
||||
// if err != nil {
|
||||
// utils.WriteJSONError(w, http.StatusInternalServerError, "внутренняя ошибка: пакет невалиден")
|
||||
// return
|
||||
// }
|
||||
|
||||
// w.Header().Set("Content-Type", "application/json")
|
||||
// w.WriteHeader(http.StatusOK)
|
||||
// if _, err := w.Write(respData); err != nil {
|
||||
// h.log.Error("Ошибка при отправке JSON", slog.String("err", err.Error()))
|
||||
// }
|
||||
|
||||
// // ===== Логгирование статуса
|
||||
// status, _ := out["status"].(string)
|
||||
// switch status {
|
||||
// case "ok":
|
||||
// h.log.Info("Успешно", slog.String("cmd", cmd), slog.Any("out", out))
|
||||
// case "error":
|
||||
// h.log.Warn("Ошибка в команде", slog.String("cmd", cmd), slog.Any("out", out))
|
||||
// default:
|
||||
// h.log.Info("Неизвестный статус", slog.String("cmd", cmd), slog.Any("out", out))
|
||||
// }
|
||||
// }
|
||||
|
||||
/*
|
||||
import (
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/core/config"
|
||||
"github.com/akyaiy/GoSally-mvp/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/core/utils"
|
||||
"github.com/go-chi/chi/v5"
|
||||
lua "github.com/yuin/gopher-lua"
|
||||
)
|
||||
|
||||
// HandlerV1 is the main handler for version 1 of the API.
|
||||
// The function processes the HTTP request and runs Lua scripts,
|
||||
// preparing the environment and subsequently transmitting the execution result
|
||||
func (h *HandlerV1) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
uuid16, err := utils.NewUUID(int(config.UUIDLength))
|
||||
if err != nil {
|
||||
h.log.Error("Failed to generate UUID",
|
||||
slog.String("error", err.Error()))
|
||||
|
||||
if err := utils.WriteJSONError(w, http.StatusInternalServerError, "failed to generate UUID: "+err.Error()); err != nil {
|
||||
h.log.Error("Failed to write JSON", slog.String("err", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
log := h.log.With(
|
||||
slog.Group("request",
|
||||
slog.String("version", h.GetVersion()),
|
||||
slog.String("url", r.URL.String()),
|
||||
slog.String("method", r.Method),
|
||||
),
|
||||
slog.Group("connection",
|
||||
slog.String("connection-uuid", uuid16),
|
||||
slog.String("remote", r.RemoteAddr),
|
||||
),
|
||||
)
|
||||
log.Info("Received request")
|
||||
|
||||
cmd := chi.URLParam(r, "cmd")
|
||||
if !h.allowedCmd.MatchString(string([]rune(cmd)[0])) || !h.listAllowedCmd.MatchString(cmd) {
|
||||
log.Error("HTTP request error",
|
||||
slog.String("error", "invalid command"),
|
||||
slog.String("cmd", cmd),
|
||||
slog.Int("status", http.StatusBadRequest))
|
||||
|
||||
if err := utils.WriteJSONError(w, http.StatusBadRequest, "invalid command"); err != nil {
|
||||
h.log.Error("Failed to write JSON", slog.String("err", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
scriptPath := h.comMatch(chi.URLParam(r, "ver"), cmd)
|
||||
if scriptPath == "" {
|
||||
log.Error("HTTP request error",
|
||||
slog.String("error", "command not found"),
|
||||
slog.String("cmd", cmd),
|
||||
slog.Int("status", http.StatusNotFound))
|
||||
|
||||
if err := utils.WriteJSONError(w, http.StatusNotFound, "command not found"); err != nil {
|
||||
h.log.Error("Failed to write JSON", slog.String("err", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
scriptPath = filepath.Join(h.cfg.ComDir, scriptPath)
|
||||
if _, err := os.Stat(scriptPath); err != nil {
|
||||
log.Error("HTTP request error",
|
||||
slog.String("error", "command not found"),
|
||||
slog.String("cmd", cmd),
|
||||
slog.Int("status", http.StatusNotFound))
|
||||
|
||||
if err := utils.WriteJSONError(w, http.StatusNotFound, "command not found"); err != nil {
|
||||
h.log.Error("Failed to write JSON", slog.String("err", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
L := lua.NewState()
|
||||
defer L.Close()
|
||||
|
||||
paramsTable := L.NewTable()
|
||||
qt := r.URL.Query()
|
||||
for k, v := range qt {
|
||||
if len(v) > 0 {
|
||||
L.SetField(paramsTable, k, lua.LString(v[0]))
|
||||
}
|
||||
}
|
||||
inTable := L.NewTable()
|
||||
L.SetField(inTable, "Params", paramsTable)
|
||||
L.SetGlobal("In", inTable)
|
||||
|
||||
// Создаем таблицу Out с Result
|
||||
resultTable := L.NewTable()
|
||||
outTable := L.NewTable()
|
||||
L.SetField(outTable, "Result", resultTable)
|
||||
L.SetGlobal("Out", outTable)
|
||||
|
||||
prepareLuaEnv := filepath.Join(h.cfg.ComDir, "_prepare.lua")
|
||||
if _, err := os.Stat(prepareLuaEnv); err == nil {
|
||||
if err := L.DoFile(prepareLuaEnv); err != nil {
|
||||
log.Error("Failed to prepare lua environment",
|
||||
slog.String("error", err.Error()))
|
||||
|
||||
if err := utils.WriteJSONError(w, http.StatusInternalServerError, "lua error: "+err.Error()); err != nil {
|
||||
h.log.Error("Failed to write JSON", slog.String("err", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
log.Warn("No environment preparation script found, skipping preparation")
|
||||
}
|
||||
|
||||
if err := L.DoFile(scriptPath); err != nil {
|
||||
log.Error("Failed to execute lua script",
|
||||
slog.String("error", err.Error()))
|
||||
if err := utils.WriteJSONError(w, http.StatusInternalServerError, "lua error: "+err.Error()); err != nil {
|
||||
h.log.Error("Failed to write JSON", slog.String("err", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
lv := L.GetGlobal("Out")
|
||||
tbl, ok := lv.(*lua.LTable)
|
||||
if !ok {
|
||||
log.Error("Lua global 'Out' is not a table")
|
||||
|
||||
if err := utils.WriteJSONError(w, http.StatusInternalServerError, "'Out' is not a table"); err != nil {
|
||||
h.log.Error("Failed to write JSON", slog.String("err", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
resultVal := tbl.RawGetString("Result")
|
||||
resultTbl, ok := resultVal.(*lua.LTable)
|
||||
if !ok {
|
||||
log.Error("Lua global 'Result' is not a table")
|
||||
|
||||
if err := utils.WriteJSONError(w, http.StatusInternalServerError, "'Result' is not a table"); err != nil {
|
||||
h.log.Error("Failed to write JSON", slog.String("err", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
out := make(map[string]any)
|
||||
resultTbl.ForEach(func(key lua.LValue, value lua.LValue) {
|
||||
out[key.String()] = utils.ConvertLuaTypesToGolang(value)
|
||||
})
|
||||
uuid32, _ := corestate.GetNodeUUID(filepath.Join(config.MetaDir, "uuid"))
|
||||
response := ResponseFormat{
|
||||
ResponsibleAgentUUID: uuid32,
|
||||
RequestedCommand: cmd,
|
||||
Response: out,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(response); err != nil {
|
||||
log.Error("Failed to encode JSON response",
|
||||
slog.String("error", err.Error()))
|
||||
}
|
||||
|
||||
status, _ := out["status"].(string)
|
||||
switch status {
|
||||
case "error":
|
||||
log.Info("Command executed with error",
|
||||
slog.String("cmd", cmd),
|
||||
slog.Any("result", out))
|
||||
case "ok":
|
||||
log.Info("Command executed successfully",
|
||||
slog.String("cmd", cmd),
|
||||
slog.Any("result", out))
|
||||
default:
|
||||
log.Info("Command executed and returned an unknown status",
|
||||
slog.String("cmd", cmd),
|
||||
slog.Any("result", out))
|
||||
}
|
||||
|
||||
log.Info("Session completed")
|
||||
}
|
||||
*/
|
||||
@@ -1,133 +0,0 @@
|
||||
package sv1
|
||||
|
||||
/*
|
||||
import (
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/core/config"
|
||||
"github.com/akyaiy/GoSally-mvp/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/core/utils"
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
// The function processes the HTTP request and returns a list of available commands.
|
||||
func (h *HandlerV1) HandleList(w http.ResponseWriter, r *http.Request) {
|
||||
uuid16, err := utils.NewUUID(int(config.UUIDLength))
|
||||
if err != nil {
|
||||
h.log.Error("Failed to generate UUID",
|
||||
slog.String("error", err.Error()))
|
||||
|
||||
if err := utils.WriteJSONError(w, http.StatusInternalServerError, "failed to generate UUID: "+err.Error()); err != nil {
|
||||
h.log.Error("Failed to write JSON", slog.String("err", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
log := h.log.With(
|
||||
slog.Group("request",
|
||||
slog.String("version", h.GetVersion()),
|
||||
slog.String("url", r.URL.String()),
|
||||
slog.String("method", r.Method),
|
||||
),
|
||||
slog.Group("connection",
|
||||
slog.String("connection-uuid", uuid16),
|
||||
slog.String("remote", r.RemoteAddr),
|
||||
),
|
||||
)
|
||||
log.Info("Received request")
|
||||
type ComMeta struct {
|
||||
Description string `json:"Description"`
|
||||
Arguments map[string]string `json:"Arguments,omitempty"`
|
||||
}
|
||||
var (
|
||||
files []os.DirEntry
|
||||
commands = make(map[string]ComMeta)
|
||||
cmdsProcessed = make(map[string]bool)
|
||||
)
|
||||
|
||||
if files, err = os.ReadDir(h.cfg.ComDir); err != nil {
|
||||
log.Error("Failed to read commands directory",
|
||||
slog.String("error", err.Error()))
|
||||
|
||||
if err := utils.WriteJSONError(w, http.StatusInternalServerError, "failed to read commands directory: "+err.Error()); err != nil {
|
||||
h.log.Error("Failed to write JSON", slog.String("err", err.Error()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
apiVer := chi.URLParam(r, "ver")
|
||||
|
||||
// Сначала ищем версионные
|
||||
for _, file := range files {
|
||||
if file.IsDir() || filepath.Ext(file.Name()) != ".lua" {
|
||||
continue
|
||||
}
|
||||
cmdFull := file.Name()[:len(file.Name())-4]
|
||||
cmdParts := strings.SplitN(cmdFull, "?", 2)
|
||||
cmdName := cmdParts[0]
|
||||
|
||||
if !h.allowedCmd.MatchString(string([]rune(cmdName)[0])) {
|
||||
continue
|
||||
}
|
||||
if !h.listAllowedCmd.MatchString(cmdName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(cmdParts) == 2 && cmdParts[1] == apiVer {
|
||||
description, _ := h.extractDescriptionStatic(filepath.Join(h.cfg.ComDir, file.Name()))
|
||||
if description == "" {
|
||||
description = "description missing"
|
||||
}
|
||||
commands[cmdName] = ComMeta{Description: description}
|
||||
cmdsProcessed[cmdName] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Потом фоллбеки
|
||||
for _, file := range files {
|
||||
if file.IsDir() || filepath.Ext(file.Name()) != ".lua" {
|
||||
continue
|
||||
}
|
||||
cmdFull := file.Name()[:len(file.Name())-4]
|
||||
cmdParts := strings.SplitN(cmdFull, "?", 2)
|
||||
cmdName := cmdParts[0]
|
||||
|
||||
if !h.allowedCmd.MatchString(string([]rune(cmdName)[0])) {
|
||||
continue
|
||||
}
|
||||
if !h.listAllowedCmd.MatchString(cmdName) {
|
||||
continue
|
||||
}
|
||||
if cmdsProcessed[cmdName] {
|
||||
continue
|
||||
}
|
||||
if len(cmdParts) == 1 {
|
||||
description, _ := h.extractDescriptionStatic(filepath.Join(h.cfg.ComDir, file.Name()))
|
||||
if description == "" {
|
||||
description = "description missing"
|
||||
}
|
||||
commands[cmdName] = ComMeta{Description: description}
|
||||
cmdsProcessed[cmdName] = true
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("Command list prepared")
|
||||
|
||||
log.Info("Session completed")
|
||||
uuid32, _ := corestate.GetNodeUUID(filepath.Join(config.MetaDir, "uuid"))
|
||||
response := ResponseFormat{
|
||||
ResponsibleAgentUUID: uuid32,
|
||||
RequestedCommand: "list",
|
||||
Response: commands,
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(response); err != nil {
|
||||
h.log.Error("Failed to write JSON error response",
|
||||
slog.String("error", err.Error()))
|
||||
}
|
||||
}
|
||||
*/
|
||||
@@ -1,20 +0,0 @@
|
||||
package sv1
|
||||
|
||||
// PETTI - Go Sally Protocol for Exchanging Technical Tasks and Information
|
||||
|
||||
type PettiRequest struct {
|
||||
PettiVer string `json:"PettiVer"`
|
||||
PackageType struct {
|
||||
Request string `json:"Request"`
|
||||
} `json:"PackageType"`
|
||||
Payload map[string]any `json:"Payload"`
|
||||
}
|
||||
|
||||
type PettiResponse struct {
|
||||
PettiVer string `json:"PettiVer"`
|
||||
PackageType struct {
|
||||
AnswerOf string `json:"AnswerOf"`
|
||||
} `json:"PackageType"`
|
||||
ResponsibleAgentUUID string `json:"ResponsibleAgentUUID"`
|
||||
Payload map[string]any `json:"Payload"`
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
package sv1
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
)
|
||||
|
||||
// func (h *HandlerV1) errNotFound(w http.ResponseWriter, r *http.Request) {
|
||||
// utils.WriteJSONError(h.w, http.StatusBadRequest, "invalid request")
|
||||
// h.log.Error("HTTP request error",
|
||||
// slog.String("remote", h.r.RemoteAddr),
|
||||
// slog.String("method", h.r.Method),
|
||||
// slog.String("url", h.r.URL.String()),
|
||||
// slog.Int("status", http.StatusBadRequest))
|
||||
// }
|
||||
|
||||
// func (h *HandlerV1) extractDescriptionStatic(path string) (string, error) {
|
||||
// data, err := os.ReadFile(path)
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
|
||||
// re := regexp.MustCompile(`---\s*#description\s*=\s*"([^"]+)"`)
|
||||
// m := re.FindStringSubmatch(string(data))
|
||||
// if len(m) <= 0 {
|
||||
// return "", nil
|
||||
// }
|
||||
// return m[1], nil
|
||||
// }
|
||||
|
||||
func (h *HandlerV1) comMatch(ver string, comName string) string {
|
||||
files, err := os.ReadDir(h.cfg.ComDir)
|
||||
if err != nil {
|
||||
h.log.Error("Failed to read com dir",
|
||||
slog.String("error", err.Error()))
|
||||
return ""
|
||||
}
|
||||
|
||||
baseName := comName + ".lua"
|
||||
verName := comName + "?" + ver + ".lua"
|
||||
|
||||
var baseFileFound string
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
fname := f.Name()
|
||||
|
||||
if fname == verName {
|
||||
return fname
|
||||
}
|
||||
|
||||
if fname == baseName {
|
||||
baseFileFound = fname
|
||||
}
|
||||
}
|
||||
|
||||
return baseFileFound
|
||||
}
|
||||
9
main.go
9
main.go
@@ -1,9 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/akyaiy/GoSally-mvp/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
}
|
||||
38
src/cmd/root.go
Normal file
38
src/cmd/root.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// The cmd package is the main package where all the main hooks and methods are called.
|
||||
// GoSally uses spf13/cobra to organize all the calls.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/hooks"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/colors"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "Go Sally node",
|
||||
Long: `
|
||||
GoSally is an http server that handles jsonrpc-2.0 requests by calling methods as lua
|
||||
scripts in a given directory. For more information, visit: https://gosally.oblat.lv/`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
_ = cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
// Execute prepares global log, loads cmdline args
|
||||
// and executes rootCmd.Execute()
|
||||
func Execute() {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetPrefix(colors.SetBrightBlack(fmt.Sprintf("(%s) ", corestate.StageNotReady)))
|
||||
log.SetFlags(log.Ldate | log.Ltime)
|
||||
hooks.Compositor.LoadCMDLine(rootCmd)
|
||||
_ = rootCmd.Execute()
|
||||
// if err := rootCmd.Execute(); err != nil {
|
||||
// log.Fatalf("Unexpected error: %s", err.Error())
|
||||
// }
|
||||
}
|
||||
20
src/cmd/run.go
Normal file
20
src/cmd/run.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/akyaiy/GoSally-mvp/src/hooks"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var runCmd = &cobra.Command{
|
||||
Use: "run",
|
||||
Aliases: []string{"r"},
|
||||
Short: "Run node normally",
|
||||
Long: `
|
||||
"run" starts the node with settings depending on the configuration file`,
|
||||
// hooks.Run essentially the heart of the program
|
||||
Run: hooks.Run,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(runCmd)
|
||||
}
|
||||
26
src/cmd/version.go
Normal file
26
src/cmd/version.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/sv1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var verCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Aliases: []string{"ver", "v"},
|
||||
Short: "Return node version",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("Go Sally version: %s\n", config.NodeVersion)
|
||||
fmt.Printf("sv1 version: %s\n", sv1.SV1Version)
|
||||
fmt.Printf("Go version: %s\n", runtime.Version())
|
||||
fmt.Printf("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(verCmd)
|
||||
}
|
||||
@@ -1,31 +1,42 @@
|
||||
module github.com/akyaiy/GoSally-mvp
|
||||
module github.com/akyaiy/GoSally-mvp/src
|
||||
|
||||
go 1.24.4
|
||||
|
||||
require (
|
||||
github.com/go-chi/chi/v5 v5.2.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/yuin/gopher-lua v1.1.1
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/net v0.42.0
|
||||
gopkg.in/ini.v1 v1.67.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
modernc.org/sqlite v1.38.2
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/sagikazarmark/locafero v0.9.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/sagikazarmark/locafero v0.10.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/afero v1.14.0 // indirect
|
||||
github.com/spf13/cast v1.9.2 // indirect
|
||||
github.com/spf13/pflag v1.0.7 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/text v0.27.0 // indirect
|
||||
modernc.org/libc v1.66.6 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -1,6 +1,8 @@
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
@@ -9,36 +11,45 @@ github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
|
||||
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||
github.com/go-chi/cors v1.2.2 h1:Jmey33TE+b+rB7fT8MUy1u0I4L+NARQlK6LhzKPSyQE=
|
||||
github.com/go-chi/cors v1.2.2/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58=
|
||||
github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
|
||||
github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
|
||||
github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/sagikazarmark/locafero v0.10.0 h1:FM8Cv6j2KqIhM2ZK7HZjm4mpj9NBktLgowT1aN9q5Cc=
|
||||
github.com/sagikazarmark/locafero v0.10.0/go.mod h1:Ieo3EUsjifvQu4NZwV5sPd4dwvu0OCgEQV7vjc9yDjw=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
|
||||
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
|
||||
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
|
||||
github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE=
|
||||
github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
|
||||
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
@@ -50,14 +61,23 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
|
||||
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
|
||||
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
|
||||
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
|
||||
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
|
||||
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
|
||||
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
|
||||
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -67,3 +87,29 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
modernc.org/cc/v4 v4.26.3 h1:yEN8dzrkRFnn4PUUKXLYIqVf2PJYAEjMTFjO3BDGc3I=
|
||||
modernc.org/cc/v4 v4.26.3/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU=
|
||||
modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE=
|
||||
modernc.org/fileutil v1.3.8 h1:qtzNm7ED75pd1C7WgAGcK4edm4fvhtBsEiI/0NQ54YM=
|
||||
modernc.org/fileutil v1.3.8/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.66.6 h1:RyQpwAhM/19nXD8y3iejM/AjmKwY2TjxZTlUWTsWw2U=
|
||||
modernc.org/libc v1.66.6/go.mod h1:j8z0EYAuumoMQ3+cWXtmw6m+LYn3qm8dcZDFtFTSq+M=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek=
|
||||
modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
420
src/hooks/initial.go
Normal file
420
src/hooks/initial.go
Normal file
@@ -0,0 +1,420 @@
|
||||
package hooks
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/colors"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/run_manager"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/logs"
|
||||
"gopkg.in/ini.v1"
|
||||
)
|
||||
|
||||
// The config composer needs to be in the global scope
|
||||
var Compositor *config.Compositor = config.NewCompositor()
|
||||
|
||||
func InitGlobalLoggerHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
x.Config = Compositor
|
||||
x.Log.SetOutput(os.Stdout)
|
||||
x.Log.SetPrefix(colors.SetBrightBlack(fmt.Sprintf("(%s) ", cs.Stage)))
|
||||
x.Log.SetFlags(log.Ldate | log.Ltime)
|
||||
}
|
||||
|
||||
// First stage: pre-init
|
||||
func InitCorestateHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
*cs = *corestate.NewCorestate(&corestate.CoreState{
|
||||
UUID32DirName: "uuid",
|
||||
NodeBinName: filepath.Base(os.Args[0]),
|
||||
NodeVersion: config.NodeVersion,
|
||||
MetaDir: "./.meta",
|
||||
Stage: corestate.StagePreInit,
|
||||
StartTimestampUnix: time.Now().Unix(),
|
||||
})
|
||||
}
|
||||
|
||||
func InitConfigLoadHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
x.Log.SetPrefix(colors.SetYellow(fmt.Sprintf("(%s) ", cs.Stage)))
|
||||
|
||||
if err := x.Config.LoadEnv(); err != nil {
|
||||
x.Log.Fatalf("env load error: %s", err)
|
||||
}
|
||||
cs.NodePath = *x.Config.Env.NodePath
|
||||
|
||||
if cfgPath := x.Config.CMDLine.Run.ConfigPath; cfgPath != "" {
|
||||
x.Config.Env.ConfigPath = &cfgPath
|
||||
}
|
||||
if err := x.Config.LoadConf(*x.Config.Env.ConfigPath); err != nil {
|
||||
x.Log.Fatalf("conf load error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// The hook reads or prepares a persistent uuid for the node
|
||||
func InitUUIDHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
uuid32, err := corestate.GetNodeUUID(filepath.Join(cs.MetaDir, "uuid"))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
if err := corestate.SetNodeUUID(filepath.Join(cs.NodePath, cs.MetaDir, cs.UUID32DirName)); err != nil {
|
||||
x.Log.Fatalf("Cannod generate node uuid: %s", err.Error())
|
||||
}
|
||||
uuid32, err = corestate.GetNodeUUID(filepath.Join(cs.MetaDir, "uuid"))
|
||||
if err != nil {
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
x.Log.Fatalf("uuid load error: %s", err)
|
||||
}
|
||||
cs.UUID32 = uuid32
|
||||
corestate.NODE_UUID = uuid32
|
||||
}
|
||||
|
||||
// The hook is responsible for checking the initialization stage
|
||||
// and restarting in some cases
|
||||
func InitRuntimeHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
if *x.Config.Env.ParentStagePID != os.Getpid() {
|
||||
// still pre-init stage
|
||||
runDir, err := run_manager.Create(cs.UUID32)
|
||||
if err != nil {
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
cs.RunDir = runDir
|
||||
input, err := os.Open(os.Args[0])
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
if err := run_manager.Set(cs.NodeBinName); err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
fmgr := run_manager.File(cs.NodeBinName)
|
||||
output, err := fmgr.Open()
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.Copy(output, input); err != nil {
|
||||
fmgr.Close()
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
if err := os.Chmod(filepath.Join(cs.RunDir, cs.NodeBinName), 0755); err != nil {
|
||||
fmgr.Close()
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
input.Close()
|
||||
fmgr.Close()
|
||||
runArgs := os.Args
|
||||
runArgs[0] = filepath.Join(cs.RunDir, cs.NodeBinName)
|
||||
|
||||
// prepare environ
|
||||
env := utils.SetEviron(os.Environ(), fmt.Sprintf("GS_PARENT_PID=%d", os.Getpid()))
|
||||
|
||||
if err := syscall.Exec(runArgs[0], runArgs, env); err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
}
|
||||
x.Log.Printf("Node uuid is %s", cs.UUID32)
|
||||
}
|
||||
|
||||
// post-init stage
|
||||
// The hook creates a run.lock file, which contains information
|
||||
// about the process and the node, in the runtime directory.
|
||||
func InitRunlockHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
NodeApp.Fallback(func(ctx context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
x.Log.Println("Cleaning up...")
|
||||
|
||||
if err := run_manager.Clean(); err != nil {
|
||||
x.Log.Printf("%s: Cleanup error: %s", colors.PrintError(), err.Error())
|
||||
}
|
||||
x.Log.Println("bye!")
|
||||
})
|
||||
|
||||
cs.Stage = corestate.StagePostInit
|
||||
x.Log.SetPrefix(colors.SetBlue(fmt.Sprintf("(%s) ", cs.Stage)))
|
||||
|
||||
cs.RunDir = run_manager.Toggle()
|
||||
exist, err := utils.ExistsMatchingDirs(filepath.Join(os.TempDir(), fmt.Sprintf("/*-%s-%s", cs.UUID32, "gosally-runtime")), cs.RunDir)
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
if exist {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unable to continue node operation: A node with the same identifier was found in the runtime environment")
|
||||
}
|
||||
|
||||
if err := run_manager.Set("run.lock"); err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
lockPath, err := run_manager.Get("run.lock")
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
lockFile := ini.Empty()
|
||||
secRun, err := lockFile.NewSection("runtime")
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
secRun.Key("pid").SetValue(fmt.Sprintf("%d/%d", os.Getpid(), x.Config.Env.ParentStagePID))
|
||||
secRun.Key("version").SetValue(cs.NodeVersion)
|
||||
secRun.Key("uuid").SetValue(cs.UUID32)
|
||||
secRun.Key("timestamp").SetValue(time.Unix(cs.StartTimestampUnix, 0).Format("2006-01-02/15:04:05 MST"))
|
||||
secRun.Key("timestamp-unix").SetValue(fmt.Sprintf("%d", cs.StartTimestampUnix))
|
||||
|
||||
err = lockFile.SaveTo(lockPath)
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// The hook reads the configuration and replaces special expressions
|
||||
// (%tmp% and so on) in string fields with the required data.
|
||||
func InitConfigReplHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
if !slices.Contains(*x.Config.Conf.DisableWarnings, "--WNonStdTmpDir") && os.TempDir() != "/tmp" {
|
||||
x.Log.Printf("%s: %s", colors.PrintWarn(), "Non-standard value specified for temporary directory")
|
||||
}
|
||||
|
||||
replacements := map[string]any{
|
||||
"%tmp%": filepath.Clean(run_manager.RuntimeDir()),
|
||||
"%path%": *x.Config.Env.NodePath,
|
||||
"%stdout%": "_1STDout",
|
||||
"%stderr%": "_2STDerr",
|
||||
"%1%": "_1STDout",
|
||||
"%2%": "_2STDerr",
|
||||
}
|
||||
|
||||
processConfig(&x.Config.Conf, replacements)
|
||||
|
||||
if !slices.Contains(logs.Levels.Available, *x.Config.Conf.Log.Level) {
|
||||
if !slices.Contains(*x.Config.Conf.DisableWarnings, "--WUndefLogLevel") {
|
||||
x.Log.Printf("%s: %s", colors.PrintWarn(), fmt.Sprintf("Unknown logging level %s, fallback level: %s", *x.Config.Conf.Log.Level, logs.Levels.Fallback))
|
||||
}
|
||||
x.Config.Conf.Log.Level = &logs.Levels.Fallback
|
||||
}
|
||||
}
|
||||
|
||||
// The hook is responsible for outputting the
|
||||
// final config and asking for confirmation.
|
||||
func InitConfigPrintHook(ctx context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
if *x.Config.Conf.Node.ShowConfig {
|
||||
fmt.Printf("Configuration from %s:\n", x.Config.CMDLine.Run.ConfigPath)
|
||||
x.Config.Print(x.Config.Conf)
|
||||
|
||||
fmt.Printf("Environment:\n")
|
||||
x.Config.Print(x.Config.Env)
|
||||
|
||||
if cs.UUID32 != "" && !askConfirm("Is that ok?", true) {
|
||||
x.Log.Printf("Cancel launch")
|
||||
NodeApp.CallFallback(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
if *x.Config.Conf.Node.Name == "noname" {
|
||||
x.Log.Printf("Starting node")
|
||||
} else {
|
||||
x.Log.Printf("Starting \"%s\" node", *x.Config.Conf.Node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func InitSLogHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
cs.Stage = corestate.StageReady
|
||||
x.Log.SetPrefix(colors.SetGreen(fmt.Sprintf("(%s) ", cs.Stage)))
|
||||
|
||||
x.SLog = new(slog.Logger)
|
||||
newSlog, err := logs.SetupLogger(x.Config.Conf.Log)
|
||||
if err != nil {
|
||||
_ = run_manager.Clean()
|
||||
x.Log.Fatalf("Unexpected failure: %s", err.Error())
|
||||
}
|
||||
*x.SLog = *newSlog
|
||||
}
|
||||
|
||||
// The method goes through the entire config structure through
|
||||
// reflection and replaces string fields with the required ones.
|
||||
func processConfig(conf any, replacements map[string]any) error {
|
||||
val := reflect.ValueOf(conf)
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
|
||||
switch val.Kind() {
|
||||
case reflect.Struct:
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
field := val.Field(i)
|
||||
if field.CanAddr() && field.CanSet() {
|
||||
if err := processConfig(field.Addr().Interface(), replacements); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
elem := val.Index(i)
|
||||
if elem.CanAddr() && elem.CanSet() {
|
||||
if err := processConfig(elem.Addr().Interface(), replacements); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
for _, key := range val.MapKeys() {
|
||||
elem := val.MapIndex(key)
|
||||
if elem.CanInterface() {
|
||||
newVal := reflect.New(elem.Type()).Elem()
|
||||
newVal.Set(elem)
|
||||
|
||||
if err := processConfig(newVal.Addr().Interface(), replacements); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val.SetMapIndex(key, newVal)
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.String:
|
||||
str := val.String()
|
||||
|
||||
if replacement, exists := replacements[str]; exists {
|
||||
if err := setValue(val, replacement); err != nil {
|
||||
return fmt.Errorf("failed to set %q: %v", str, err)
|
||||
}
|
||||
} else {
|
||||
for placeholder, replacement := range replacements {
|
||||
if strings.Contains(str, placeholder) {
|
||||
replacementStr, err := toString(replacement)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid replacement for %q: %v", placeholder, err)
|
||||
}
|
||||
newStr := strings.ReplaceAll(str, placeholder, replacementStr)
|
||||
val.SetString(newStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
if !val.IsNil() {
|
||||
elem := val.Elem()
|
||||
if elem.Kind() == reflect.String {
|
||||
str := elem.String()
|
||||
if replacement, exists := replacements[str]; exists {
|
||||
strVal, err := toString(replacement)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot convert replacement to string: %v", err)
|
||||
}
|
||||
elem.SetString(strVal)
|
||||
} else {
|
||||
for placeholder, replacement := range replacements {
|
||||
if strings.Contains(str, placeholder) {
|
||||
replacementStr, err := toString(replacement)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid replacement for %q: %v", placeholder, err)
|
||||
}
|
||||
newStr := strings.ReplaceAll(str, placeholder, replacementStr)
|
||||
elem.SetString(newStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return processConfig(elem.Addr().Interface(), replacements)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setValue(val reflect.Value, replacement any) error {
|
||||
if !val.CanSet() {
|
||||
return fmt.Errorf("value is not settable")
|
||||
}
|
||||
|
||||
replacementVal := reflect.ValueOf(replacement)
|
||||
if replacementVal.Type().AssignableTo(val.Type()) {
|
||||
val.Set(replacementVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
if val.Kind() == reflect.String {
|
||||
str, err := toString(replacement)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot convert replacement to string: %v", err)
|
||||
}
|
||||
val.SetString(str)
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("type mismatch: cannot assign %T to %v", replacement, val.Type())
|
||||
}
|
||||
|
||||
func toString(v any) (string, error) {
|
||||
switch s := v.(type) {
|
||||
case string:
|
||||
return s, nil
|
||||
case fmt.Stringer:
|
||||
return s.String(), nil
|
||||
default:
|
||||
return fmt.Sprint(v), nil
|
||||
}
|
||||
}
|
||||
|
||||
func askConfirm(prompt string, defaultYes bool) bool {
|
||||
ctx, _ := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
|
||||
fmt.Print(prompt)
|
||||
if defaultYes {
|
||||
fmt.Printf(" (%s/%s): ", colors.SetBrightGreen("Y"), colors.SetBrightRed("n"))
|
||||
} else {
|
||||
fmt.Printf(" (%s/%s): ", colors.SetBrightGreen("n"), colors.SetBrightRed("Y"))
|
||||
}
|
||||
|
||||
inputChan := make(chan string, 1)
|
||||
|
||||
go func() {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
text, _ := reader.ReadString('\n')
|
||||
inputChan <- text
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
fmt.Println("")
|
||||
NodeApp.CallFallback(ctx)
|
||||
os.Exit(3)
|
||||
case text := <-inputChan:
|
||||
text = strings.TrimSpace(strings.ToLower(text))
|
||||
if text == "" {
|
||||
return defaultYes
|
||||
}
|
||||
if text == "y" || text == "yes" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return defaultYes
|
||||
}
|
||||
185
src/hooks/run.go
Normal file
185
src/hooks/run.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package hooks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/colors"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/run_manager"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/update"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/logs"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/gateway"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/session"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/sv1"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/sv2"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/cors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/netutil"
|
||||
)
|
||||
|
||||
var NodeApp = app.New()
|
||||
var AllowedCmdPattern = `^[a-zA-Z0-9]+(\.[a-zA-Z0-9]+)*$`
|
||||
|
||||
func Run(cmd *cobra.Command, args []string) {
|
||||
NodeApp.InitialHooks(
|
||||
InitGlobalLoggerHook, InitCorestateHook, InitConfigLoadHook,
|
||||
InitUUIDHook, InitRuntimeHook, InitRunlockHook,
|
||||
InitConfigReplHook, InitConfigPrintHook, InitSLogHook,
|
||||
)
|
||||
|
||||
NodeApp.Run(RunHook)
|
||||
}
|
||||
|
||||
func RunHook(ctx context.Context, cs *corestate.CoreState, x *app.AppX) error {
|
||||
ctxMain, cancelMain := context.WithCancel(ctx)
|
||||
runLockFile := run_manager.File("run.lock")
|
||||
_, err := runLockFile.Open()
|
||||
if err != nil {
|
||||
x.Log.Fatalf("cannot open run.lock: %s", err)
|
||||
}
|
||||
|
||||
_, err = runLockFile.Watch(ctxMain, func() {
|
||||
x.Log.Printf("run.lock was touched")
|
||||
_ = run_manager.Clean()
|
||||
cancelMain()
|
||||
})
|
||||
if err != nil {
|
||||
x.Log.Printf("watch error: %s", err)
|
||||
}
|
||||
|
||||
serverv1 := sv1.InitV1Server(&sv1.HandlerV1InitStruct{
|
||||
X: x,
|
||||
CS: cs,
|
||||
AllowedCmd: regexp.MustCompile(AllowedCmdPattern),
|
||||
Ver: "v1",
|
||||
})
|
||||
|
||||
sv2 := sv2.InitServer(&sv2.HandlerInitStruct{
|
||||
X: x,
|
||||
CS: cs,
|
||||
AllowedCmd: regexp.MustCompile(AllowedCmdPattern),
|
||||
Ver: "v2",
|
||||
})
|
||||
|
||||
session_manager := session.New(*x.Config.Conf.HTTPServer.SessionTTL)
|
||||
|
||||
s := gateway.InitGateway(&gateway.GatewayServerInit{
|
||||
SM: session_manager,
|
||||
CS: cs,
|
||||
X: x,
|
||||
}, serverv1, sv2)
|
||||
|
||||
r := chi.NewRouter()
|
||||
r.Use(cors.Handler(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"POST"},
|
||||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token", "X-Session-UUID"},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 300,
|
||||
}))
|
||||
r.HandleFunc(config.ComDirRoute, s.Handle)
|
||||
r.Route("/favicon.ico", func(r chi.Router) {
|
||||
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
})
|
||||
})
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: *x.Config.Conf.HTTPServer.Address,
|
||||
Handler: r,
|
||||
ErrorLog: log.New(&logs.SlogWriter{
|
||||
Logger: x.SLog,
|
||||
Level: slog.LevelError,
|
||||
}, "", 0),
|
||||
}
|
||||
|
||||
NodeApp.Fallback(func(ctx context.Context, cs *corestate.CoreState, x *app.AppX) {
|
||||
if err := srv.Shutdown(ctxMain); err != nil {
|
||||
x.Log.Printf("%s: Failed to stop the server gracefully: %s", colors.PrintError(), err.Error())
|
||||
} else {
|
||||
x.Log.Printf("Server stopped gracefully")
|
||||
}
|
||||
|
||||
x.Log.Println("Cleaning up...")
|
||||
|
||||
if err := run_manager.Clean(); err != nil {
|
||||
x.Log.Printf("%s: Cleanup error: %s", colors.PrintError(), err.Error())
|
||||
}
|
||||
x.Log.Println("bye!")
|
||||
})
|
||||
|
||||
go func() {
|
||||
defer utils.CatchPanicWithCancel(cancelMain)
|
||||
if *x.Config.Conf.TLS.TlsEnabled {
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%s", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port))
|
||||
if err != nil {
|
||||
x.Log.Printf("%s: Failed to start TLS listener: %s", colors.PrintError(), err.Error())
|
||||
cancelMain()
|
||||
return
|
||||
}
|
||||
x.Log.Printf("Serving on %s port %s with TLS... (https://%s%s)", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port, fmt.Sprintf("%s:%s", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port), config.ComDirRoute)
|
||||
limitedListener := netutil.LimitListener(listener, 100)
|
||||
if err := srv.ServeTLS(limitedListener, *x.Config.Conf.TLS.CertFile, *x.Config.Conf.TLS.KeyFile); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
x.Log.Printf("%s: Failed to start HTTPS server: %s", colors.PrintError(), err.Error())
|
||||
cancelMain()
|
||||
}
|
||||
} else {
|
||||
x.Log.Printf("Serving on %s port %s... (http://%s%s)", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port, fmt.Sprintf("%s:%s", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port), config.ComDirRoute)
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%s", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port))
|
||||
if err != nil {
|
||||
x.Log.Printf("%s: Failed to start listener: %s", colors.PrintError(), err.Error())
|
||||
cancelMain()
|
||||
return
|
||||
}
|
||||
limitedListener := netutil.LimitListener(listener, 100)
|
||||
if err := srv.Serve(limitedListener); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
x.Log.Printf("%s: Failed to start HTTP server: %s", colors.PrintError(), err.Error())
|
||||
cancelMain()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
session_manager.StartCleanup(5 * time.Second)
|
||||
|
||||
if *x.Config.Conf.Updates.UpdatesEnabled {
|
||||
go func() {
|
||||
defer utils.CatchPanicWithCancel(cancelMain)
|
||||
updated := update.NewUpdater(&update.UpdaterInit{
|
||||
X: x,
|
||||
Ctx: ctxMain,
|
||||
Cancel: cancelMain,
|
||||
})
|
||||
updated.Shutdownfunc(cancelMain)
|
||||
for {
|
||||
isNewUpdate, err := updated.CkeckUpdates()
|
||||
if err != nil {
|
||||
x.Log.Printf("Failed to check for updates: %s", err.Error())
|
||||
}
|
||||
if isNewUpdate {
|
||||
if err := updated.Update(); err != nil {
|
||||
x.Log.Printf("Failed to update: %s", err.Error())
|
||||
} else {
|
||||
x.Log.Printf("Update completed successfully")
|
||||
}
|
||||
}
|
||||
time.Sleep(*x.Config.Conf.Updates.CheckInterval)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
<-ctxMain.Done()
|
||||
NodeApp.CallFallback(ctx)
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package logs
|
||||
package colors
|
||||
|
||||
import "fmt"
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package corestate
|
||||
|
||||
var NODE_UUID string
|
||||
|
||||
type Stage string
|
||||
|
||||
const (
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/utils"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
|
||||
)
|
||||
|
||||
// GetNodeUUID outputs the correct uuid from the file at the path specified in the arguments.
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/utils"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
|
||||
)
|
||||
|
||||
type RunManagerContract interface {
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -14,9 +13,10 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/run_manager"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/utils"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/run_manager"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@@ -38,20 +38,23 @@ type UpdaterContract interface {
|
||||
}
|
||||
|
||||
type Updater struct {
|
||||
log *log.Logger
|
||||
config *config.Conf
|
||||
env *config.Env
|
||||
x *app.AppX
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func NewUpdater(ctx context.Context, log *log.Logger, cfg *config.Conf, env *config.Env) *Updater {
|
||||
type UpdaterInit struct {
|
||||
X *app.AppX
|
||||
Ctx context.Context
|
||||
Cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func NewUpdater(o *UpdaterInit) *Updater {
|
||||
return &Updater{
|
||||
log: log,
|
||||
config: cfg,
|
||||
env: env,
|
||||
ctx: ctx,
|
||||
x: o.X,
|
||||
ctx: o.Ctx,
|
||||
cancel: o.Cancel,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,7 +122,7 @@ func isVersionNewer(current, latest Version) bool {
|
||||
func (u *Updater) GetCurrentVersion() (Version, Branch, error) {
|
||||
version, branch, err := splitVersionString(string(config.NodeVersion))
|
||||
if err != nil {
|
||||
u.log.Printf("Failed to parse version string: %s", err.Error())
|
||||
u.x.Log.Printf("Failed to parse version string: %s", err.Error())
|
||||
return "", "", err
|
||||
}
|
||||
switch branch {
|
||||
@@ -131,28 +134,28 @@ func (u *Updater) GetCurrentVersion() (Version, Branch, error) {
|
||||
}
|
||||
|
||||
func (u *Updater) GetLatestVersion(updateBranch Branch) (Version, Branch, error) {
|
||||
repoURL := u.config.Updates.RepositoryURL
|
||||
repoURL := *u.x.Config.Conf.Updates.RepositoryURL
|
||||
if repoURL == "" {
|
||||
u.log.Printf("Failed to get latest version: %s", "RepositoryURL is empty in config")
|
||||
u.x.Log.Printf("Failed to get latest version: %s", "RepositoryURL is empty in config")
|
||||
return "", "", errors.New("repository URL is empty")
|
||||
}
|
||||
if !strings.HasPrefix(repoURL, "http://") && !strings.HasPrefix(repoURL, "https://") {
|
||||
u.log.Printf("Failed to get latest version: %s: %s", "RepositoryURL does not start with http:// or https:/", repoURL)
|
||||
u.x.Log.Printf("Failed to get latest version: %s: %s", "RepositoryURL does not start with http:// or https:/", repoURL)
|
||||
return "", "", errors.New("repository URL must start with http:// or https://")
|
||||
}
|
||||
response, err := http.Get(repoURL + "/" + config.ActualFileName)
|
||||
if err != nil {
|
||||
u.log.Printf("Failed to fetch latest version: %s", err.Error())
|
||||
u.x.Log.Printf("Failed to fetch latest version: %s", err.Error())
|
||||
return "", "", err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
u.log.Printf("Failed to fetch latest version: HTTP status %d", response.StatusCode)
|
||||
u.x.Log.Printf("Failed to fetch latest version: HTTP status %d", response.StatusCode)
|
||||
return "", "", errors.New("failed to fetch latest version, status code: " + http.StatusText(response.StatusCode))
|
||||
}
|
||||
data, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
u.log.Printf("Failed to read latest version response: %s", err.Error())
|
||||
u.x.Log.Printf("Failed to read latest version response: %s", err.Error())
|
||||
return "", "", err
|
||||
}
|
||||
lines := strings.Split(string(data), "\n")
|
||||
@@ -163,7 +166,7 @@ func (u *Updater) GetLatestVersion(updateBranch Branch) (Version, Branch, error)
|
||||
}
|
||||
version, branch, err := splitVersionString(string(line))
|
||||
if err != nil {
|
||||
u.log.Printf("Failed to parse version string: %s", err.Error())
|
||||
u.x.Log.Printf("Failed to parse version string: %s", err.Error())
|
||||
return "", "", err
|
||||
}
|
||||
if branch == updateBranch {
|
||||
@@ -189,7 +192,7 @@ func (u *Updater) CkeckUpdates() (IsNewUpdate, error) {
|
||||
}
|
||||
|
||||
func (u *Updater) Update() error {
|
||||
if !u.config.Updates.UpdatesEnabled {
|
||||
if !*u.x.Config.Conf.Updates.UpdatesEnabled {
|
||||
return errors.New("updates are disabled in config, skipping update")
|
||||
}
|
||||
|
||||
@@ -209,7 +212,7 @@ func (u *Updater) Update() error {
|
||||
}
|
||||
|
||||
updateArchiveName := fmt.Sprintf("%s.v%s-%s", config.UpdateArchiveName, latestVersion, latestBranch)
|
||||
updateDest := fmt.Sprintf("%s/%s.%s", u.config.Updates.RepositoryURL, updateArchiveName, "tar.gz")
|
||||
updateDest := fmt.Sprintf("%s/%s.%s", *u.x.Config.Conf.Updates.RepositoryURL, updateArchiveName, "tar.gz")
|
||||
|
||||
resp, err := http.Get(updateDest)
|
||||
if err != nil {
|
||||
@@ -275,7 +278,7 @@ func (u *Updater) Update() error {
|
||||
|
||||
func (u *Updater) InstallAndRestart() error {
|
||||
|
||||
nodePath := u.env.NodePath
|
||||
nodePath := *u.x.Config.Env.NodePath
|
||||
if nodePath == "" {
|
||||
return errors.New("GS_NODE_PATH environment variable is not set")
|
||||
}
|
||||
@@ -303,12 +306,7 @@ func (u *Updater) InstallAndRestart() error {
|
||||
return fmt.Errorf("failed to chmod: %w", err)
|
||||
}
|
||||
|
||||
u.log.Printf("Launching new version: path is %s", targetPath)
|
||||
// cmd := exec.Command(targetPath, os.Args[1:]...)
|
||||
// cmd.Env = os.Environ()
|
||||
// cmd.Stdout = os.Stdout
|
||||
// cmd.Stderr = os.Stderr
|
||||
// cmd.Stdin = os.Stdin
|
||||
u.x.Log.Printf("Launching new version: path is %s", targetPath)
|
||||
args := os.Args
|
||||
args[0] = targetPath
|
||||
env := utils.SetEviron(os.Environ(), "GS_PARENT_PID=-1")
|
||||
@@ -317,17 +315,6 @@ func (u *Updater) InstallAndRestart() error {
|
||||
return err
|
||||
}
|
||||
return syscall.Exec(targetPath, args, env)
|
||||
//u.cancel()
|
||||
|
||||
// TODO: fix this crap and find a better way to update without errors
|
||||
// for {
|
||||
// _, err := run_manager.Get("run.lock")
|
||||
// if err != nil {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
|
||||
// return cmd.Start()
|
||||
}
|
||||
|
||||
func (u *Updater) Shutdownfunc(f context.CancelFunc) {
|
||||
34
src/internal/core/utils/panic.go
Normal file
34
src/internal/core/utils/panic.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"log"
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func CatchPanic() {
|
||||
if err := recover(); err != nil {
|
||||
stack := make([]byte, 8096)
|
||||
stack = stack[:runtime.Stack(stack, false)]
|
||||
log.Printf("recovered panic:\n%s", stack)
|
||||
}
|
||||
}
|
||||
|
||||
func CatchPanicWithCancel(cancel context.CancelFunc) {
|
||||
if err := recover(); err != nil {
|
||||
stack := make([]byte, 8096)
|
||||
stack = stack[:runtime.Stack(stack, false)]
|
||||
log.Printf("recovered panic:\n%s", stack)
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func CatchPanicWithFallback(onPanic func(any)) {
|
||||
if err := recover(); err != nil {
|
||||
stack := make([]byte, 8096)
|
||||
stack = stack[:runtime.Stack(stack, false)]
|
||||
log.Printf("recovered panic:\n%s", stack)
|
||||
onPanic(err)
|
||||
}
|
||||
}
|
||||
9
src/internal/core/utils/safe_fetch.go
Normal file
9
src/internal/core/utils/safe_fetch.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package utils
|
||||
|
||||
// SafeFetch safely fetches data. If v = nil, a fallback value is returned.
|
||||
func SafeFetch[T any](v *T, fallback T) T {
|
||||
if v == nil {
|
||||
return fallback
|
||||
}
|
||||
return *v
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
|
||||
)
|
||||
|
||||
func NewUUIDRaw(length int) ([]byte, error) {
|
||||
95
src/internal/engine/app/app.go
Normal file
95
src/internal/engine/app/app.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
|
||||
)
|
||||
|
||||
type AppContract interface {
|
||||
InitialHooks(fn ...func(ctx context.Context, cs *corestate.CoreState, x *AppX))
|
||||
Run(fn func(ctx context.Context, cs *corestate.CoreState, x *AppX) error)
|
||||
Fallback(fn func(ctx context.Context, cs *corestate.CoreState, x *AppX))
|
||||
|
||||
CallFallback(ctx context.Context)
|
||||
}
|
||||
|
||||
type App struct {
|
||||
initHooks []func(ctx context.Context, cs *corestate.CoreState, x *AppX)
|
||||
runHook func(ctx context.Context, cs *corestate.CoreState, x *AppX) error
|
||||
fallback func(ctx context.Context, cs *corestate.CoreState, x *AppX)
|
||||
|
||||
Corestate *corestate.CoreState
|
||||
AppX *AppX
|
||||
|
||||
fallbackOnce sync.Once
|
||||
}
|
||||
|
||||
type AppX struct {
|
||||
Config *config.Compositor
|
||||
Log *log.Logger
|
||||
SLog *slog.Logger
|
||||
}
|
||||
|
||||
func New() AppContract {
|
||||
return &App{
|
||||
AppX: &AppX{
|
||||
Log: log.Default(),
|
||||
},
|
||||
Corestate: &corestate.CoreState{},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) InitialHooks(fn ...func(ctx context.Context, cs *corestate.CoreState, x *AppX)) {
|
||||
a.initHooks = append(a.initHooks, fn...)
|
||||
}
|
||||
|
||||
func (a *App) Fallback(fn func(ctx context.Context, cs *corestate.CoreState, x *AppX)) {
|
||||
a.fallback = fn
|
||||
}
|
||||
|
||||
func (a *App) Run(fn func(ctx context.Context, cs *corestate.CoreState, x *AppX) error) {
|
||||
a.runHook = fn
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
defer stop()
|
||||
|
||||
for _, hook := range a.initHooks {
|
||||
hook(ctx, a.Corestate, a.AppX)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
a.AppX.Log.Printf("PANIC recovered: %v", r)
|
||||
if a.fallback != nil {
|
||||
a.fallback(ctx, a.Corestate, a.AppX)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
var runErr error
|
||||
if a.runHook != nil {
|
||||
runErr = a.runHook(ctx, a.Corestate, a.AppX)
|
||||
}
|
||||
|
||||
if runErr != nil {
|
||||
a.AppX.Log.Fatalf("fatal in Run: %v", runErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) CallFallback(ctx context.Context) {
|
||||
a.fallbackOnce.Do(func() {
|
||||
if a.fallback != nil {
|
||||
a.fallback(ctx, a.Corestate, a.AppX)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
}
|
||||
@@ -43,10 +43,13 @@ func (c *Compositor) LoadConf(path string) error {
|
||||
v.SetConfigType("yaml")
|
||||
|
||||
// defaults
|
||||
v.SetDefault("mode", "dev")
|
||||
v.SetDefault("com_dir", "./com/")
|
||||
v.SetDefault("node.name", "noname")
|
||||
v.SetDefault("node.mode", "dev")
|
||||
v.SetDefault("node.show_config", "false")
|
||||
v.SetDefault("node.com_dir", "./com/")
|
||||
v.SetDefault("http_server.address", "0.0.0.0")
|
||||
v.SetDefault("http_server.port", "8080")
|
||||
v.SetDefault("http_server.session_ttl", "30m")
|
||||
v.SetDefault("http_server.timeout", "5s")
|
||||
v.SetDefault("http_server.idle_timeout", "60s")
|
||||
v.SetDefault("tls.enabled", false)
|
||||
@@ -55,8 +58,10 @@ func (c *Compositor) LoadConf(path string) error {
|
||||
v.SetDefault("updates.enabled", false)
|
||||
v.SetDefault("updates.check_interval", "2h")
|
||||
v.SetDefault("updates.wanted_version", "latest-stable")
|
||||
v.SetDefault("log.json_format", "false")
|
||||
v.SetDefault("log.level", "info")
|
||||
v.SetDefault("log.out_path", "")
|
||||
v.SetDefault("log.output", "%2%")
|
||||
v.SetDefault("disable_warnings", []string{})
|
||||
|
||||
if err := v.ReadInConfig(); err != nil {
|
||||
return fmt.Errorf("error reading config: %w", err)
|
||||
82
src/internal/engine/config/config.go
Normal file
82
src/internal/engine/config/config.go
Normal file
@@ -0,0 +1,82 @@
|
||||
// Package config provides configuration management for the application.
|
||||
// config is built on top of the third-party module cleanenv
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type CompositorContract interface {
|
||||
LoadEnv() error
|
||||
LoadConf(path string) error
|
||||
}
|
||||
|
||||
type Compositor struct {
|
||||
CMDLine *CMDLine
|
||||
Conf *Conf
|
||||
Env *Env
|
||||
}
|
||||
|
||||
type Conf struct {
|
||||
Node *Node `mapstructure:"node"`
|
||||
HTTPServer *HTTPServer `mapstructure:"http_server"`
|
||||
TLS *TLS `mapstructure:"tls"`
|
||||
Updates *Updates `mapstructure:"updates"`
|
||||
Log *Log `mapstructure:"log"`
|
||||
DisableWarnings *[]string `mapstructure:"disable_warnings"`
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
Mode *string `mapstructure:"mode"`
|
||||
Name *string `mapstructure:"name"`
|
||||
ShowConfig *bool `mapstructure:"show_config"`
|
||||
ComDir *string `mapstructure:"com_dir"`
|
||||
}
|
||||
|
||||
type HTTPServer struct {
|
||||
Address *string `mapstructure:"address"`
|
||||
Port *string `mapstructure:"port"`
|
||||
SessionTTL *time.Duration `mapstructure:"session_ttl"`
|
||||
Timeout *time.Duration `mapstructure:"timeout"`
|
||||
IdleTimeout *time.Duration `mapstructure:"idle_timeout"`
|
||||
}
|
||||
|
||||
type TLS struct {
|
||||
TlsEnabled *bool `mapstructure:"enabled"`
|
||||
CertFile *string `mapstructure:"cert_file"`
|
||||
KeyFile *string `mapstructure:"key_file"`
|
||||
}
|
||||
|
||||
type Updates struct {
|
||||
UpdatesEnabled *bool `mapstructure:"enabled"`
|
||||
CheckInterval *time.Duration `mapstructure:"check_interval"`
|
||||
RepositoryURL *string `mapstructure:"repository_url"`
|
||||
WantedVersion *string `mapstructure:"wanted_version"`
|
||||
}
|
||||
|
||||
type Log struct {
|
||||
JSON *bool `mapstructure:"json_format"`
|
||||
Level *string `mapstructure:"level"`
|
||||
OutPath *string `mapstructure:"output"`
|
||||
}
|
||||
|
||||
// ConfigEnv structure for environment variables
|
||||
type Env struct {
|
||||
ConfigPath *string `mapstructure:"config_path"`
|
||||
NodePath *string `mapstructure:"node_path"`
|
||||
ParentStagePID *int `mapstructure:"parent_pid"`
|
||||
}
|
||||
|
||||
type CMDLine struct {
|
||||
Run Run
|
||||
Node Root
|
||||
}
|
||||
|
||||
type Root struct {
|
||||
Debug bool `persistent:"true" full:"debug" short:"d" def:"false" desc:"Set debug mode"`
|
||||
}
|
||||
|
||||
type Run struct {
|
||||
ConfigPath string `persistent:"true" full:"config" short:"c" def:"./config.yaml" desc:"Path to configuration file"`
|
||||
Test []int `persistent:"true" full:"test" short:"t" def:"" desc:"js test"`
|
||||
}
|
||||
@@ -2,6 +2,8 @@ package config
|
||||
|
||||
import "os"
|
||||
|
||||
// TODO: Need to make a more harmonious and understandable way of storing global variables
|
||||
|
||||
// UUIDLength is uuids length for sessions. By default it is 16 bytes.
|
||||
var UUIDLength int = 16
|
||||
|
||||
72
src/internal/engine/config/print.go
Normal file
72
src/internal/engine/config/print.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/colors"
|
||||
)
|
||||
|
||||
func (c *Compositor) Print(v any) {
|
||||
c.printConfig(v, " ")
|
||||
}
|
||||
|
||||
func (c *Compositor) printConfig(v any, prefix string) {
|
||||
val := reflect.ValueOf(v)
|
||||
if val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
|
||||
typ := val.Type()
|
||||
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
field := val.Field(i)
|
||||
fieldType := typ.Field(i)
|
||||
|
||||
fieldName := fieldType.Name
|
||||
if tag, ok := fieldType.Tag.Lookup("mapstructure"); ok {
|
||||
if tag != "" {
|
||||
fieldName = tag
|
||||
}
|
||||
}
|
||||
|
||||
coloredFieldName := colors.SetBrightCyan(fieldName)
|
||||
|
||||
if field.Kind() == reflect.Ptr {
|
||||
if field.IsNil() {
|
||||
fmt.Printf("%s%s: %s\n", prefix, coloredFieldName, colors.SetBrightRed("<nil>"))
|
||||
continue
|
||||
}
|
||||
field = field.Elem()
|
||||
}
|
||||
|
||||
if field.Kind() == reflect.Struct {
|
||||
if field.Type() == reflect.TypeOf(time.Duration(0)) {
|
||||
duration := field.Interface().(time.Duration)
|
||||
fmt.Printf("%s%s: %s\n",
|
||||
prefix,
|
||||
coloredFieldName,
|
||||
colors.SetBrightYellow(duration.String()))
|
||||
} else {
|
||||
fmt.Printf("%s%s:\n", prefix, coloredFieldName)
|
||||
c.printConfig(field.Addr().Interface(), prefix+" ")
|
||||
}
|
||||
} else if field.Kind() == reflect.Slice {
|
||||
fmt.Printf("%s%s: %s\n",
|
||||
prefix,
|
||||
coloredFieldName,
|
||||
colors.SetBrightYellow(fmt.Sprintf("%v", field.Interface())))
|
||||
} else {
|
||||
value := field.Interface()
|
||||
valueStr := fmt.Sprintf("%v", value)
|
||||
if field.Kind() == reflect.String {
|
||||
valueStr = fmt.Sprintf("\"%s\"", value)
|
||||
}
|
||||
fmt.Printf("%s%s: %s\n",
|
||||
prefix,
|
||||
coloredFieldName,
|
||||
colors.SetBrightYellow(valueStr))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,16 +10,25 @@ import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/core/run_manager"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
var GlobalLevel slog.Level
|
||||
|
||||
type levelsStruct struct {
|
||||
Available []string
|
||||
Fallback string
|
||||
}
|
||||
|
||||
var Levels = levelsStruct{
|
||||
Available: []string{
|
||||
"debug", "info",
|
||||
},
|
||||
Fallback: "info",
|
||||
}
|
||||
|
||||
type SlogWriter struct {
|
||||
Logger *slog.Logger
|
||||
Level slog.Level
|
||||
@@ -32,11 +41,11 @@ func (w *SlogWriter) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
// SetupLogger initializes and returns a logger based on the provided environment.
|
||||
func SetupLogger(o config.Log) (*slog.Logger, error) {
|
||||
func SetupLogger(o *config.Log) (*slog.Logger, error) {
|
||||
var handlerOpts = slog.HandlerOptions{}
|
||||
var writer io.Writer = os.Stdout
|
||||
|
||||
switch o.Level {
|
||||
switch *o.Level {
|
||||
case "debug":
|
||||
GlobalLevel = slog.LevelDebug
|
||||
handlerOpts.Level = slog.LevelDebug
|
||||
@@ -48,32 +57,14 @@ func SetupLogger(o config.Log) (*slog.Logger, error) {
|
||||
handlerOpts.Level = slog.LevelInfo
|
||||
}
|
||||
|
||||
if o.OutPath != "" {
|
||||
repl := map[string]string{
|
||||
"tmp": filepath.Clean(run_manager.RuntimeDir()),
|
||||
}
|
||||
re := regexp.MustCompile(`%(\w+)%`)
|
||||
result := re.ReplaceAllStringFunc(o.OutPath, func(match string) string {
|
||||
sub := re.FindStringSubmatch(match)
|
||||
if len(sub) < 2 {
|
||||
return match
|
||||
}
|
||||
key := sub[1]
|
||||
if val, ok := repl[key]; ok {
|
||||
return val
|
||||
}
|
||||
return match
|
||||
})
|
||||
|
||||
if strings.Contains(o.OutPath, "%tmp%") {
|
||||
relPath := strings.TrimPrefix(result, filepath.Clean(run_manager.RuntimeDir()))
|
||||
if err := run_manager.SetDir(relPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
switch *o.OutPath {
|
||||
case "_1STDout":
|
||||
writer = os.Stdout
|
||||
case "_2STDerr":
|
||||
writer = os.Stderr
|
||||
default:
|
||||
logFile := &lumberjack.Logger{
|
||||
Filename: filepath.Join(result, "event.log"),
|
||||
Filename: filepath.Join(*o.OutPath, "event.log"),
|
||||
MaxSize: 10,
|
||||
MaxBackups: 5,
|
||||
MaxAge: 28,
|
||||
@@ -82,6 +73,13 @@ func SetupLogger(o config.Log) (*slog.Logger, error) {
|
||||
writer = logFile
|
||||
}
|
||||
|
||||
log := slog.New(slog.NewJSONHandler(writer, &handlerOpts))
|
||||
var handler slog.Handler
|
||||
|
||||
if *o.JSON {
|
||||
handler = slog.NewJSONHandler(writer, &handlerOpts)
|
||||
} else {
|
||||
handler = slog.NewTextHandler(writer, &handlerOpts)
|
||||
}
|
||||
log := slog.New(handler)
|
||||
return log, nil
|
||||
}
|
||||
1
src/internal/engine/lua/handler.go
Normal file
1
src/internal/engine/lua/handler.go
Normal file
@@ -0,0 +1 @@
|
||||
package lua
|
||||
35
src/internal/engine/lua/pool.go
Normal file
35
src/internal/engine/lua/pool.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package lua
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
lua "github.com/yuin/gopher-lua"
|
||||
)
|
||||
|
||||
type LuaPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func NewLuaPool() *LuaPool {
|
||||
return &LuaPool{
|
||||
pool: sync.Pool{
|
||||
New: func() any {
|
||||
L := lua.NewState()
|
||||
|
||||
return L
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (lp *LuaPool) Get() *lua.LState {
|
||||
return lp.pool.Get().(*lua.LState)
|
||||
}
|
||||
|
||||
func (lp *LuaPool) Put(L *lua.LState) {
|
||||
L.Close()
|
||||
|
||||
newL := lua.NewState()
|
||||
|
||||
lp.pool.Put(newL)
|
||||
}
|
||||
25
src/internal/engine/lua/types.go
Normal file
25
src/internal/engine/lua/types.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package lua
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
|
||||
)
|
||||
|
||||
type LuaEngineDeps struct {
|
||||
HttpRequest *http.Request
|
||||
JSONRPCRequest *rpc.RPCRequest
|
||||
SessionUUID string
|
||||
ScriptPath string
|
||||
}
|
||||
|
||||
type LuaEngineContract interface {
|
||||
Handle(deps *LuaEngineDeps) *rpc.RPCResponse
|
||||
}
|
||||
|
||||
type LuaEngine struct {
|
||||
x *app.AppX
|
||||
cs *corestate.CoreState
|
||||
}
|
||||
@@ -1,11 +1,13 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/internal/server/rpc"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/session"
|
||||
)
|
||||
|
||||
// serversApiVer is a type alias for string, used to represent API version strings in the GeneralServer.
|
||||
@@ -13,18 +15,16 @@ type serversApiVer string
|
||||
|
||||
type ServerApiContract interface {
|
||||
GetVersion() string
|
||||
Handle(w http.ResponseWriter, r *http.Request, req rpc.RPCRequest)
|
||||
Handle(ctx context.Context, sid string, r *http.Request, req *rpc.RPCRequest) *rpc.RPCResponse
|
||||
}
|
||||
|
||||
// GeneralServer implements the GeneralServerApiContract and serves as a router for different API versions.
|
||||
type GatewayServer struct {
|
||||
w http.ResponseWriter
|
||||
r *http.Request
|
||||
|
||||
// servers holds the registered servers by their API version.
|
||||
// The key is the version string, and the value is the server implementing GeneralServerApi
|
||||
servers map[serversApiVer]ServerApiContract
|
||||
|
||||
log *slog.Logger
|
||||
cfg *config.Conf
|
||||
sm *session.SessionManager
|
||||
cs *corestate.CoreState
|
||||
x *app.AppX
|
||||
}
|
||||
@@ -2,23 +2,26 @@ package gateway
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/session"
|
||||
)
|
||||
|
||||
// GeneralServerInit structure only for initialization general server.
|
||||
type GatewayServerInit struct {
|
||||
Log *slog.Logger
|
||||
Config *config.Conf
|
||||
SM *session.SessionManager
|
||||
CS *corestate.CoreState
|
||||
X *app.AppX
|
||||
}
|
||||
|
||||
// InitGeneral initializes a new GeneralServer with the provided configuration and registered servers.
|
||||
func InitGateway(o *GatewayServerInit, servers ...ServerApiContract) *GatewayServer {
|
||||
general := &GatewayServer{
|
||||
servers: make(map[serversApiVer]ServerApiContract),
|
||||
cfg: o.Config,
|
||||
log: o.Log,
|
||||
sm: o.SM,
|
||||
cs: o.CS,
|
||||
x: o.X,
|
||||
}
|
||||
|
||||
// register the provided servers
|
||||
114
src/internal/server/gateway/route.go
Normal file
114
src/internal/server/gateway/route.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func (gs *GatewayServer) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context() // TODO
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
sessionUUID := r.Header.Get("X-Session-UUID")
|
||||
if sessionUUID == "" {
|
||||
sessionUUID = uuid.New().String()
|
||||
|
||||
}
|
||||
gs.x.SLog.Debug("new request", slog.String("session-uuid", sessionUUID), slog.Group("connection", slog.String("ip", r.RemoteAddr)))
|
||||
|
||||
w.Header().Set("X-Session-UUID", sessionUUID)
|
||||
if !gs.sm.Add(sessionUUID) {
|
||||
gs.x.SLog.Debug("session is busy", slog.String("session-uuid", sessionUUID))
|
||||
rpc.WriteError(w, rpc.NewError(rpc.ErrSessionIsBusy, rpc.ErrSessionIsBusyS, nil, nil))
|
||||
return
|
||||
}
|
||||
defer gs.sm.Delete(sessionUUID)
|
||||
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
gs.x.SLog.Debug("failed to read body", slog.String("err", err.Error()))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
rpc.WriteError(w, rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, nil))
|
||||
gs.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrInternalErrorS))
|
||||
return
|
||||
}
|
||||
|
||||
// determine if the JSON-RPC request is a batch
|
||||
var batch []rpc.RPCRequest
|
||||
json.Unmarshal(body, &batch)
|
||||
var single rpc.RPCRequest
|
||||
if batch == nil {
|
||||
if err := json.Unmarshal(body, &single); err != nil {
|
||||
gs.x.SLog.Debug("failed to parse json", slog.String("err", err.Error()))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
rpc.WriteError(w, rpc.NewError(rpc.ErrParseError, rpc.ErrParseErrorS, nil, nil))
|
||||
gs.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrParseErrorS))
|
||||
return
|
||||
}
|
||||
resp := gs.Route(ctx, sessionUUID, r, &single)
|
||||
if resp == nil {
|
||||
w.Write([]byte(""))
|
||||
return
|
||||
}
|
||||
rpc.WriteResponse(w, resp)
|
||||
return
|
||||
}
|
||||
|
||||
// handle batch
|
||||
responses := make(chan rpc.RPCResponse, len(batch))
|
||||
var wg sync.WaitGroup
|
||||
for _, m := range batch {
|
||||
wg.Add(1)
|
||||
go func(req rpc.RPCRequest) {
|
||||
defer wg.Done()
|
||||
res := gs.Route(ctx, sessionUUID, r, &req)
|
||||
if res != nil {
|
||||
responses <- *res
|
||||
}
|
||||
}(m)
|
||||
}
|
||||
wg.Wait()
|
||||
close(responses)
|
||||
|
||||
var result []rpc.RPCResponse
|
||||
for res := range responses {
|
||||
result = append(result, res)
|
||||
}
|
||||
if len(result) > 0 {
|
||||
json.NewEncoder(w).Encode(result)
|
||||
} else {
|
||||
w.Write([]byte("[]"))
|
||||
}
|
||||
}
|
||||
|
||||
func (gs *GatewayServer) Route(ctx context.Context, sid string, r *http.Request, req *rpc.RPCRequest) (resp *rpc.RPCResponse) {
|
||||
defer utils.CatchPanicWithFallback(func(rec any) {
|
||||
gs.x.SLog.Error("panic caught in handler", slog.Any("error", rec))
|
||||
resp = rpc.NewError(rpc.ErrInternalError, "Internal server error (panic)", nil, req.ID)
|
||||
})
|
||||
if req.JSONRPC != rpc.JSONRPCVersion {
|
||||
gs.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrInvalidRequestS), slog.String("requested-version", req.JSONRPC))
|
||||
return rpc.NewError(rpc.ErrInvalidRequest, rpc.ErrInvalidRequestS, nil, req.ID)
|
||||
}
|
||||
|
||||
server, ok := gs.servers[serversApiVer(req.ContextVersion)]
|
||||
if !ok {
|
||||
gs.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrContextVersionS), slog.String("requested-version", req.ContextVersion))
|
||||
return rpc.NewError(rpc.ErrContextVersion, rpc.ErrContextVersionS, nil, req.ID)
|
||||
}
|
||||
|
||||
// checks if request is notification
|
||||
if req.ID == nil {
|
||||
go server.Handle(ctx, sid, r, req)
|
||||
return nil
|
||||
}
|
||||
return server.Handle(ctx, sid, r, req)
|
||||
}
|
||||
30
src/internal/server/rpc/definition.go
Normal file
30
src/internal/server/rpc/definition.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package rpc
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
type RPCRequest struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID *json.RawMessage `json:"id,omitempty"`
|
||||
Method string `json:"method"`
|
||||
Params any `json:"params,omitempty"`
|
||||
ContextVersion string `json:"context-version,omitempty"`
|
||||
}
|
||||
|
||||
type RPCResponse struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID *json.RawMessage `json:"id"`
|
||||
Result any `json:"result,omitzero"`
|
||||
Error any `json:"error,omitzero"`
|
||||
Data *RPCData `json:"data,omitzero"`
|
||||
}
|
||||
|
||||
type RPCData struct {
|
||||
ResponsibleNode string `json:"responsible-node,omitempty"`
|
||||
Salt string `json:"salt,omitempty"`
|
||||
Checksum string `json:"checksum-md5,omitempty"`
|
||||
NewSessionUUID string `json:"new-session-uuid,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
JSONRPCVersion = "2.0"
|
||||
)
|
||||
@@ -1,10 +1,5 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const (
|
||||
ErrParseError = -32700
|
||||
ErrParseErrorS = "Parse error"
|
||||
@@ -23,17 +18,13 @@ const (
|
||||
|
||||
ErrContextVersion = -32010
|
||||
ErrContextVersionS = "Invalid context version"
|
||||
|
||||
ErrInvalidMethodFormat = -32020
|
||||
ErrInvalidMethodFormatS = "Invalid method format"
|
||||
|
||||
ErrMethodIsMissing = -32020
|
||||
ErrMethodIsMissingS = "Method is missing"
|
||||
|
||||
ErrSessionIsBusy = -32030
|
||||
ErrSessionIsBusyS = "The session is busy"
|
||||
)
|
||||
|
||||
func WriteRouterError(w http.ResponseWriter, status int, e *RPCError) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
|
||||
data, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = w.Write(data)
|
||||
return err
|
||||
}
|
||||
60
src/internal/server/rpc/responsers.go
Normal file
60
src/internal/server/rpc/responsers.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func generateChecksum(result any) string {
|
||||
if result == nil {
|
||||
return ""
|
||||
}
|
||||
data, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%x", md5.Sum(data))
|
||||
}
|
||||
|
||||
func generateSalt() string {
|
||||
return uuid.NewString()
|
||||
}
|
||||
|
||||
func GetData(data any) *RPCData {
|
||||
return &RPCData{
|
||||
Salt: generateSalt(),
|
||||
ResponsibleNode: corestate.NODE_UUID,
|
||||
Checksum: generateChecksum(data),
|
||||
}
|
||||
}
|
||||
|
||||
func NewError(code int, message string, data any, id *json.RawMessage) *RPCResponse {
|
||||
Error := make(map[string]any)
|
||||
Error = map[string]any{
|
||||
"code": code,
|
||||
"message": message,
|
||||
}
|
||||
if data != nil {
|
||||
Error["data"] = data
|
||||
}
|
||||
|
||||
return &RPCResponse{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
ID: id,
|
||||
Error: Error,
|
||||
Data: GetData(Error),
|
||||
}
|
||||
}
|
||||
|
||||
func NewResponse(result any, id *json.RawMessage) *RPCResponse {
|
||||
return &RPCResponse{
|
||||
JSONRPC: JSONRPCVersion,
|
||||
ID: id,
|
||||
Result: result,
|
||||
Data: GetData(result),
|
||||
}
|
||||
}
|
||||
23
src/internal/server/rpc/writers.go
Normal file
23
src/internal/server/rpc/writers.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func write(w http.ResponseWriter, msg *RPCResponse) error {
|
||||
data, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
func WriteError(w http.ResponseWriter, errm *RPCResponse) error {
|
||||
return write(w, errm)
|
||||
}
|
||||
|
||||
func WriteResponse(w http.ResponseWriter, response *RPCResponse) error {
|
||||
return write(w, response)
|
||||
}
|
||||
47
src/internal/server/session/manager.go
Normal file
47
src/internal/server/session/manager.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package session
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SessionManagerContract interface {
|
||||
Add(uuid string) bool
|
||||
Delete(uuid string)
|
||||
StartCleanup(interval time.Duration)
|
||||
}
|
||||
|
||||
type SessionManager struct {
|
||||
sessions sync.Map
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
func New(ttl time.Duration) *SessionManager {
|
||||
return &SessionManager{
|
||||
ttl: ttl,
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SessionManager) Add(uuid string) bool {
|
||||
_, loaded := sm.sessions.LoadOrStore(uuid, time.Now().Add(sm.ttl))
|
||||
return !loaded
|
||||
}
|
||||
|
||||
func (sm *SessionManager) Delete(uuid string) {
|
||||
sm.sessions.Delete(uuid)
|
||||
}
|
||||
|
||||
func (sm *SessionManager) StartCleanup(interval time.Duration) {
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
for range ticker.C {
|
||||
sm.sessions.Range(func(key, value any) bool {
|
||||
expiry := value.(time.Time)
|
||||
if time.Now().After(expiry) {
|
||||
sm.sessions.Delete(key)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
}()
|
||||
}
|
||||
415
src/internal/server/sv1/db_sqlite.go
Normal file
415
src/internal/server/sv1/db_sqlite.go
Normal file
@@ -0,0 +1,415 @@
|
||||
package sv1
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
||||
lua "github.com/yuin/gopher-lua"
|
||||
)
|
||||
|
||||
type DBConnection struct {
|
||||
dbPath string
|
||||
log bool
|
||||
logger *slog.Logger
|
||||
writeChan chan *dbWriteRequest
|
||||
closeChan chan struct{}
|
||||
}
|
||||
|
||||
type dbWriteRequest struct {
|
||||
query string
|
||||
args []interface{}
|
||||
resCh chan *dbWriteResult
|
||||
}
|
||||
|
||||
type dbWriteResult struct {
|
||||
rowsAffected int64
|
||||
err error
|
||||
}
|
||||
|
||||
var dbMutexMap = make(map[string]*sync.RWMutex)
|
||||
var dbGlobalMutex sync.Mutex
|
||||
|
||||
func getDBMutex(dbPath string) *sync.RWMutex {
|
||||
dbGlobalMutex.Lock()
|
||||
defer dbGlobalMutex.Unlock()
|
||||
|
||||
if mtx, ok := dbMutexMap[dbPath]; ok {
|
||||
return mtx
|
||||
}
|
||||
|
||||
mtx := &sync.RWMutex{}
|
||||
dbMutexMap[dbPath] = mtx
|
||||
return mtx
|
||||
}
|
||||
|
||||
func loadDBMod(llog *slog.Logger, sid string) func(*lua.LState) int {
|
||||
return func(L *lua.LState) int {
|
||||
llog.Debug("import module db-sqlite")
|
||||
dbMod := L.NewTable()
|
||||
|
||||
L.SetField(dbMod, "connect", L.NewFunction(func(L *lua.LState) int {
|
||||
dbPath := L.CheckString(1)
|
||||
|
||||
logQueries := false
|
||||
if L.GetTop() >= 2 {
|
||||
opts := L.CheckTable(2)
|
||||
if val := opts.RawGetString("log"); val != lua.LNil {
|
||||
logQueries = lua.LVAsBool(val)
|
||||
}
|
||||
}
|
||||
|
||||
conn := &DBConnection{
|
||||
dbPath: dbPath,
|
||||
log: logQueries,
|
||||
logger: llog,
|
||||
writeChan: make(chan *dbWriteRequest, 100),
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
go conn.processWrites()
|
||||
|
||||
ud := L.NewUserData()
|
||||
ud.Value = conn
|
||||
L.SetMetatable(ud, L.GetTypeMetatable("gosally_db"))
|
||||
|
||||
L.Push(ud)
|
||||
return 1
|
||||
}))
|
||||
|
||||
mt := L.NewTypeMetatable("gosally_db")
|
||||
L.SetField(mt, "__index", L.SetFuncs(L.NewTable(), map[string]lua.LGFunction{
|
||||
"exec": dbExec,
|
||||
"query": dbQuery,
|
||||
"query_row": dbQueryRow,
|
||||
"close": dbClose,
|
||||
}))
|
||||
|
||||
L.SetField(dbMod, "__seed", lua.LString(sid))
|
||||
L.Push(dbMod)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *DBConnection) processWrites() {
|
||||
for {
|
||||
select {
|
||||
case req := <-conn.writeChan:
|
||||
mtx := getDBMutex(conn.dbPath)
|
||||
mtx.Lock()
|
||||
|
||||
db, err := sql.Open("sqlite", conn.dbPath+"?_busy_timeout=5000&_journal_mode=WAL&_sync=NORMAL&_cache_size=-10000")
|
||||
if err == nil {
|
||||
_, err = db.Exec("PRAGMA journal_mode=WAL;")
|
||||
if err == nil {
|
||||
res, execErr := db.Exec(req.query, req.args...)
|
||||
if execErr == nil {
|
||||
rows, _ := res.RowsAffected()
|
||||
req.resCh <- &dbWriteResult{rowsAffected: rows}
|
||||
} else {
|
||||
req.resCh <- &dbWriteResult{err: execErr}
|
||||
}
|
||||
}
|
||||
db.Close()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
req.resCh <- &dbWriteResult{err: err}
|
||||
}
|
||||
|
||||
mtx.Unlock()
|
||||
case <-conn.closeChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func dbExec(L *lua.LState) int {
|
||||
ud := L.CheckUserData(1)
|
||||
conn, ok := ud.Value.(*DBConnection)
|
||||
if !ok {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString("invalid database connection"))
|
||||
return 2
|
||||
}
|
||||
|
||||
query := L.CheckString(2)
|
||||
|
||||
var args []any
|
||||
if L.GetTop() >= 3 {
|
||||
params := L.CheckTable(3)
|
||||
params.ForEach(func(k lua.LValue, v lua.LValue) {
|
||||
args = append(args, ConvertLuaTypesToGolang(v))
|
||||
})
|
||||
}
|
||||
|
||||
if conn.log {
|
||||
conn.logger.Info("DB Exec",
|
||||
slog.String("query", query),
|
||||
slog.Any("params", args))
|
||||
}
|
||||
|
||||
resCh := make(chan *dbWriteResult, 1)
|
||||
conn.writeChan <- &dbWriteRequest{
|
||||
query: query,
|
||||
args: args,
|
||||
resCh: resCh,
|
||||
}
|
||||
|
||||
ctx := L.NewTable()
|
||||
L.SetField(ctx, "done", lua.LBool(false))
|
||||
|
||||
var result lua.LValue = lua.LNil
|
||||
var errorMsg lua.LValue = lua.LNil
|
||||
|
||||
L.SetField(ctx, "wait", L.NewFunction(func(L *lua.LState) int {
|
||||
res := <-resCh
|
||||
L.SetField(ctx, "done", lua.LBool(true))
|
||||
|
||||
if res.err != nil {
|
||||
errorMsg = lua.LString(res.err.Error())
|
||||
result = lua.LNil
|
||||
} else {
|
||||
result = lua.LNumber(res.rowsAffected)
|
||||
errorMsg = lua.LNil
|
||||
}
|
||||
|
||||
if res.err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(res.err.Error()))
|
||||
return 2
|
||||
}
|
||||
L.Push(lua.LNumber(res.rowsAffected))
|
||||
L.Push(lua.LNil)
|
||||
return 2
|
||||
}))
|
||||
|
||||
L.SetField(ctx, "check", L.NewFunction(func(L *lua.LState) int {
|
||||
select {
|
||||
case res := <-resCh:
|
||||
L.SetField(ctx, "done", lua.LBool(true))
|
||||
if res.err != nil {
|
||||
errorMsg = lua.LString(res.err.Error())
|
||||
result = lua.LNil
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(res.err.Error()))
|
||||
return 2
|
||||
} else {
|
||||
result = lua.LNumber(res.rowsAffected)
|
||||
errorMsg = lua.LNil
|
||||
L.Push(lua.LNumber(res.rowsAffected))
|
||||
L.Push(lua.LNil)
|
||||
return 2
|
||||
}
|
||||
default:
|
||||
L.Push(result)
|
||||
L.Push(errorMsg)
|
||||
return 2
|
||||
}
|
||||
}))
|
||||
|
||||
L.Push(ctx)
|
||||
L.Push(lua.LNil)
|
||||
return 2
|
||||
}
|
||||
|
||||
func dbQueryRow(L *lua.LState) int {
|
||||
ud := L.CheckUserData(1)
|
||||
conn, ok := ud.Value.(*DBConnection)
|
||||
if !ok {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString("invalid database connection"))
|
||||
return 2
|
||||
}
|
||||
|
||||
query := L.CheckString(2)
|
||||
|
||||
var args []any
|
||||
if L.GetTop() >= 3 {
|
||||
params := L.CheckTable(3)
|
||||
params.ForEach(func(k lua.LValue, v lua.LValue) {
|
||||
args = append(args, ConvertLuaTypesToGolang(v))
|
||||
})
|
||||
}
|
||||
|
||||
if conn.log {
|
||||
conn.logger.Info("DB QueryRow",
|
||||
slog.String("query", query),
|
||||
slog.Any("params", args))
|
||||
}
|
||||
|
||||
mtx := getDBMutex(conn.dbPath)
|
||||
mtx.RLock()
|
||||
defer mtx.RUnlock()
|
||||
|
||||
db, err := sql.Open("sqlite", conn.dbPath+"?_busy_timeout=5000&_journal_mode=WAL&_sync=NORMAL&_cache_size=-10000")
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
row := db.QueryRow(query, args...)
|
||||
|
||||
columns := []string{}
|
||||
stmt, err := db.Prepare(query)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(fmt.Sprintf("prepare failed: %v", err)))
|
||||
return 2
|
||||
}
|
||||
defer stmt.Close()
|
||||
rows, err := stmt.Query(args...)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(fmt.Sprintf("query failed: %v", err)))
|
||||
return 2
|
||||
}
|
||||
defer rows.Close()
|
||||
cols, err := rows.Columns()
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(fmt.Sprintf("get columns failed: %v", err)))
|
||||
return 2
|
||||
}
|
||||
for _, c := range cols {
|
||||
columns = append(columns, c)
|
||||
}
|
||||
|
||||
colCount := len(columns)
|
||||
values := make([]any, colCount)
|
||||
valuePtrs := make([]any, colCount)
|
||||
for i := range columns {
|
||||
valuePtrs[i] = &values[i]
|
||||
}
|
||||
|
||||
err = row.Scan(valuePtrs...)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
L.Push(lua.LNil)
|
||||
return 1
|
||||
}
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(fmt.Sprintf("scan failed: %v", err)))
|
||||
return 2
|
||||
}
|
||||
|
||||
rowTable := L.NewTable()
|
||||
for i, col := range columns {
|
||||
val := values[i]
|
||||
if val == nil {
|
||||
L.SetField(rowTable, col, lua.LNil)
|
||||
} else {
|
||||
L.SetField(rowTable, col, ConvertGolangTypesToLua(L, val))
|
||||
}
|
||||
}
|
||||
|
||||
L.Push(rowTable)
|
||||
return 1
|
||||
}
|
||||
|
||||
func dbQuery(L *lua.LState) int {
|
||||
ud := L.CheckUserData(1)
|
||||
conn, ok := ud.Value.(*DBConnection)
|
||||
if !ok {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString("invalid database connection"))
|
||||
return 2
|
||||
}
|
||||
|
||||
query := L.CheckString(2)
|
||||
|
||||
var args []any
|
||||
if L.GetTop() >= 3 {
|
||||
params := L.CheckTable(3)
|
||||
params.ForEach(func(k lua.LValue, v lua.LValue) {
|
||||
args = append(args, ConvertLuaTypesToGolang(v))
|
||||
})
|
||||
}
|
||||
|
||||
if conn.log {
|
||||
conn.logger.Info("DB Query",
|
||||
slog.String("query", query),
|
||||
slog.Any("params", args))
|
||||
}
|
||||
|
||||
mtx := getDBMutex(conn.dbPath)
|
||||
mtx.RLock()
|
||||
defer mtx.RUnlock()
|
||||
|
||||
db, err := sql.Open("sqlite", conn.dbPath+"?_busy_timeout=5000&_journal_mode=WAL&_sync=NORMAL&_cache_size=-10000")
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
rows, err := db.Query(query, args...)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(fmt.Sprintf("query failed: %v", err)))
|
||||
return 2
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
columns, err := rows.Columns()
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(fmt.Sprintf("get columns failed: %v", err)))
|
||||
return 2
|
||||
}
|
||||
|
||||
result := L.NewTable()
|
||||
colCount := len(columns)
|
||||
values := make([]any, colCount)
|
||||
valuePtrs := make([]any, colCount)
|
||||
|
||||
for rows.Next() {
|
||||
for i := range columns {
|
||||
valuePtrs[i] = &values[i]
|
||||
}
|
||||
|
||||
if err := rows.Scan(valuePtrs...); err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(fmt.Sprintf("scan failed: %v", err)))
|
||||
return 2
|
||||
}
|
||||
|
||||
rowTable := L.NewTable()
|
||||
for i, col := range columns {
|
||||
val := values[i]
|
||||
if val == nil {
|
||||
L.SetField(rowTable, col, lua.LNil)
|
||||
} else {
|
||||
L.SetField(rowTable, col, ConvertGolangTypesToLua(L, val))
|
||||
}
|
||||
}
|
||||
result.Append(rowTable)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(fmt.Sprintf("rows iteration failed: %v", err)))
|
||||
return 2
|
||||
}
|
||||
|
||||
L.Push(result)
|
||||
return 1
|
||||
}
|
||||
|
||||
func dbClose(L *lua.LState) int {
|
||||
ud := L.CheckUserData(1)
|
||||
conn, ok := ud.Value.(*DBConnection)
|
||||
if !ok {
|
||||
L.Push(lua.LFalse)
|
||||
L.Push(lua.LString("invalid database connection"))
|
||||
return 2
|
||||
}
|
||||
|
||||
close(conn.closeChan)
|
||||
L.Push(lua.LTrue)
|
||||
return 1
|
||||
}
|
||||
39
src/internal/server/sv1/handle.go
Normal file
39
src/internal/server/sv1/handle.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package sv1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
|
||||
)
|
||||
|
||||
func (h *HandlerV1) Handle(_ context.Context, sid string, r *http.Request, req *rpc.RPCRequest) *rpc.RPCResponse {
|
||||
if req.Method == "" {
|
||||
h.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrMethodNotFoundS), slog.String("requested-method", req.Method))
|
||||
return rpc.NewError(rpc.ErrMethodIsMissing, rpc.ErrMethodIsMissingS, nil, req.ID)
|
||||
}
|
||||
|
||||
method, err := h.resolveMethodPath(req.Method)
|
||||
if err != nil {
|
||||
if err.Error() == rpc.ErrInvalidMethodFormatS {
|
||||
h.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrInvalidMethodFormatS), slog.String("requested-method", req.Method))
|
||||
return rpc.NewError(rpc.ErrInvalidMethodFormat, rpc.ErrInvalidMethodFormatS, nil, req.ID)
|
||||
} else if err.Error() == rpc.ErrMethodNotFoundS {
|
||||
h.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrMethodNotFoundS), slog.String("requested-method", req.Method))
|
||||
return rpc.NewError(rpc.ErrMethodNotFound, rpc.ErrMethodNotFoundS, nil, req.ID)
|
||||
}
|
||||
}
|
||||
switch req.Params.(type) {
|
||||
case map[string]any, []any, nil:
|
||||
return h.handleLUA(sid, r, req, method)
|
||||
default:
|
||||
// JSON-RPC 2.0 Specification:
|
||||
// https://www.jsonrpc.org/specification#parameter_structures
|
||||
//
|
||||
// "params" MUST be either an *array* or an *object* if included.
|
||||
// Any other type (e.g., a number, string, or boolean) is INVALID.
|
||||
h.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrInvalidParamsS))
|
||||
return rpc.NewError(rpc.ErrInvalidParams, rpc.ErrInvalidParamsS, nil, req.ID)
|
||||
}
|
||||
}
|
||||
86
src/internal/server/sv1/jwt.go
Normal file
86
src/internal/server/sv1/jwt.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package sv1
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v5"
|
||||
lua "github.com/yuin/gopher-lua"
|
||||
)
|
||||
|
||||
func loadJWTMod(llog *slog.Logger, sid string) func(*lua.LState) int {
|
||||
return func(L *lua.LState) int {
|
||||
llog.Debug("import module jwt")
|
||||
jwtMod := L.NewTable()
|
||||
|
||||
L.SetField(jwtMod, "encode", L.NewFunction(jwtEncode))
|
||||
L.SetField(jwtMod, "decode", L.NewFunction(jwtDecode))
|
||||
|
||||
L.SetField(jwtMod, "__seed", lua.LString(sid))
|
||||
L.Push(jwtMod)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
func jwtEncode(L *lua.LState) int {
|
||||
payloadTbl := L.CheckTable(1)
|
||||
secret := L.GetField(payloadTbl, "secret").String()
|
||||
payload := L.GetField(payloadTbl, "payload").(*lua.LTable)
|
||||
expiresIn := L.GetField(payloadTbl, "expires_in")
|
||||
expDuration := time.Hour
|
||||
|
||||
if expiresIn.Type() == lua.LTNumber {
|
||||
floatVal := ConvertLuaTypesToGolang(expiresIn).(float64)
|
||||
expDuration = time.Duration(floatVal) * time.Second
|
||||
}
|
||||
|
||||
claims := jwt.MapClaims{}
|
||||
payload.ForEach(func(key, value lua.LValue) {
|
||||
claims[key.String()] = ConvertLuaTypesToGolang(value)
|
||||
})
|
||||
claims["iat"] = time.Now().Unix()
|
||||
claims["exp"] = time.Now().Add(expDuration).Unix()
|
||||
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
|
||||
signedToken, err := token.SignedString([]byte(secret))
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
|
||||
L.Push(lua.LString(signedToken))
|
||||
return 1
|
||||
}
|
||||
|
||||
func jwtDecode(L *lua.LState) int {
|
||||
tokenString := L.CheckString(1)
|
||||
optsTbl := L.OptTable(2, L.NewTable())
|
||||
secret := L.GetField(optsTbl, "secret").String()
|
||||
|
||||
token, err := jwt.Parse(tokenString, func(t *jwt.Token) (any, error) {
|
||||
return []byte(secret), nil
|
||||
})
|
||||
|
||||
if err != nil || !token.Valid {
|
||||
L.Push(lua.LString("Invalid token: " + err.Error()))
|
||||
L.Push(lua.LNil)
|
||||
return 2
|
||||
}
|
||||
|
||||
claims, ok := token.Claims.(jwt.MapClaims)
|
||||
if !ok {
|
||||
L.Push(lua.LString("Invalid claims"))
|
||||
L.Push(lua.LNil)
|
||||
return 2
|
||||
}
|
||||
|
||||
luaTable := L.NewTable()
|
||||
for k, v := range claims {
|
||||
luaTable.RawSetString(k, ConvertGolangTypesToLua(L, v))
|
||||
}
|
||||
|
||||
L.Push(lua.LNil)
|
||||
L.Push(luaTable)
|
||||
return 2
|
||||
}
|
||||
636
src/internal/server/sv1/lua_handler.go
Normal file
636
src/internal/server/sv1/lua_handler.go
Normal file
@@ -0,0 +1,636 @@
|
||||
package sv1
|
||||
|
||||
// TODO: make a lua state pool using sync.Pool
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"math/rand/v2"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/colors"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
|
||||
lua "github.com/yuin/gopher-lua"
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
func addInitiatorHeaders(sid string, req *http.Request, headers http.Header) {
|
||||
clientIP := req.RemoteAddr
|
||||
if forwardedFor := req.Header.Get("X-Forwarded-For"); forwardedFor != "" {
|
||||
clientIP = forwardedFor
|
||||
}
|
||||
headers.Set("X-Initiator-IP", clientIP)
|
||||
headers.Set("X-Session-UUID", sid)
|
||||
headers.Set("X-Initiator-Host", req.Host)
|
||||
headers.Set("X-Initiator-User-Agent", req.UserAgent())
|
||||
headers.Set("X-Initiator-Referer", req.Referer())
|
||||
}
|
||||
|
||||
// A small reminder: this code is only at the MVP stage,
|
||||
// and some parts of the code may cause shock from the
|
||||
// incompetence of the developer. But, in the end,
|
||||
// this code is just an idea. If there is a desire to
|
||||
// contribute to the development of the code,
|
||||
// I will be only glad.
|
||||
// TODO: make this huge function more harmonious by dividing responsibilities
|
||||
func (h *HandlerV1) handleLUA(sid string, r *http.Request, req *rpc.RPCRequest, path string) *rpc.RPCResponse {
|
||||
var __exit = -1
|
||||
|
||||
llog := h.x.SLog.With(slog.String("session-id", sid))
|
||||
llog.Debug("handling LUA")
|
||||
L := lua.NewState()
|
||||
defer L.Close()
|
||||
|
||||
osMod := L.GetGlobal("os").(*lua.LTable)
|
||||
L.SetField(osMod, "exit", lua.LNil)
|
||||
|
||||
ioMod := L.GetGlobal("io").(*lua.LTable)
|
||||
for _, k := range []string{"write", "output", "flush", "read", "input"} {
|
||||
ioMod.RawSetString(k, lua.LNil)
|
||||
}
|
||||
L.Env.RawSetString("print", lua.LNil)
|
||||
|
||||
for _, name := range []string{"stdout", "stderr", "stdin"} {
|
||||
stream := ioMod.RawGetString(name)
|
||||
if t, ok := stream.(*lua.LUserData); ok {
|
||||
t.Metatable = lua.LNil
|
||||
}
|
||||
}
|
||||
|
||||
seed := rand.Int()
|
||||
|
||||
loadSessionMod := func(L *lua.LState) int {
|
||||
llog.Debug("import module session", slog.String("script", path))
|
||||
sessionMod := L.NewTable()
|
||||
inTable := L.NewTable()
|
||||
paramsTable := L.NewTable()
|
||||
headersTable := L.NewTable()
|
||||
|
||||
fetchedHeadersTable := L.NewTable()
|
||||
for k, v := range r.Header {
|
||||
L.SetField(fetchedHeadersTable, k, ConvertGolangTypesToLua(L, v))
|
||||
}
|
||||
|
||||
headersGetter := L.NewFunction(func(L *lua.LState) int {
|
||||
path := L.OptString(1, "")
|
||||
def := L.Get(2)
|
||||
|
||||
get := func(path string) lua.LValue {
|
||||
if path == "" {
|
||||
return fetchedHeadersTable
|
||||
}
|
||||
fetched := r.Header.Get(path)
|
||||
if fetched == "" {
|
||||
return lua.LNil
|
||||
}
|
||||
return lua.LString(fetched)
|
||||
}
|
||||
val := get(path)
|
||||
if val == lua.LNil && def != lua.LNil {
|
||||
L.Push(def)
|
||||
} else {
|
||||
L.Push(val)
|
||||
}
|
||||
return 1
|
||||
})
|
||||
|
||||
L.SetField(headersTable, "__fetched", fetchedHeadersTable)
|
||||
|
||||
L.SetField(headersTable, "get", headersGetter)
|
||||
L.SetField(inTable, "headers", headersTable)
|
||||
|
||||
fetchedParamsTable := L.NewTable()
|
||||
switch params := req.Params.(type) {
|
||||
case map[string]any:
|
||||
for k, v := range params {
|
||||
L.SetField(fetchedParamsTable, k, ConvertGolangTypesToLua(L, v))
|
||||
}
|
||||
case []any:
|
||||
for i, v := range params {
|
||||
fetchedParamsTable.RawSetInt(i+1, ConvertGolangTypesToLua(L, v))
|
||||
}
|
||||
}
|
||||
|
||||
paramsGetter := L.NewFunction(func(L *lua.LState) int {
|
||||
path := L.OptString(1, "")
|
||||
def := L.Get(2)
|
||||
|
||||
get := func(tbl *lua.LTable, path string) lua.LValue {
|
||||
if path == "" {
|
||||
return tbl
|
||||
}
|
||||
current := tbl
|
||||
parts := strings.Split(path, ".")
|
||||
size := len(parts)
|
||||
for index, key := range parts {
|
||||
val := current.RawGetString(key)
|
||||
if tblVal, ok := val.(*lua.LTable); ok {
|
||||
current = tblVal
|
||||
} else {
|
||||
if index == size-1 {
|
||||
return val
|
||||
}
|
||||
return lua.LNil
|
||||
}
|
||||
}
|
||||
return current
|
||||
}
|
||||
|
||||
paramsTbl := L.GetField(paramsTable, "__fetched") //
|
||||
val := get(paramsTbl.(*lua.LTable), path) //
|
||||
if val == lua.LNil && def != lua.LNil {
|
||||
L.Push(def)
|
||||
} else {
|
||||
L.Push(val)
|
||||
}
|
||||
return 1
|
||||
})
|
||||
L.SetField(paramsTable, "__fetched", fetchedParamsTable)
|
||||
|
||||
L.SetField(paramsTable, "get", paramsGetter)
|
||||
L.SetField(inTable, "params", paramsTable)
|
||||
|
||||
outTable := L.NewTable()
|
||||
scriptDataTable := L.NewTable()
|
||||
L.SetField(outTable, "__script_data", scriptDataTable)
|
||||
|
||||
L.SetField(inTable, "address", lua.LString(r.RemoteAddr))
|
||||
|
||||
L.SetField(sessionMod, "throw_error", L.NewFunction(func(L *lua.LState) int {
|
||||
arg := L.Get(1)
|
||||
var msg string
|
||||
switch arg.Type() {
|
||||
case lua.LTString:
|
||||
msg = arg.String()
|
||||
case lua.LTNumber:
|
||||
msg = strconv.FormatFloat(float64(arg.(lua.LNumber)), 'f', -1, 64)
|
||||
default:
|
||||
L.ArgError(1, "expected string or number")
|
||||
return 0
|
||||
}
|
||||
|
||||
L.RaiseError("%s", msg)
|
||||
return 0
|
||||
}))
|
||||
|
||||
resTable := L.NewTable()
|
||||
L.SetField(scriptDataTable, "result", resTable)
|
||||
L.SetField(outTable, "send", L.NewFunction(func(L *lua.LState) int {
|
||||
res := L.Get(1)
|
||||
|
||||
resFTable := scriptDataTable.RawGetString("result")
|
||||
if resPTable, ok := res.(*lua.LTable); ok {
|
||||
resPTable.ForEach(func(key, value lua.LValue) {
|
||||
L.SetField(resFTable, key.String(), value)
|
||||
})
|
||||
} else {
|
||||
L.SetField(scriptDataTable, "result", res)
|
||||
}
|
||||
|
||||
__exit = 0
|
||||
L.RaiseError("__successfull")
|
||||
return 0
|
||||
}))
|
||||
|
||||
L.SetField(outTable, "set", L.NewFunction(func(L *lua.LState) int {
|
||||
res := L.Get(1)
|
||||
if res == lua.LNil {
|
||||
return 0
|
||||
}
|
||||
|
||||
resFTable := scriptDataTable.RawGetString("result")
|
||||
if resPTable, ok := res.(*lua.LTable); ok {
|
||||
resPTable.ForEach(func(key, value lua.LValue) {
|
||||
L.SetField(resFTable, key.String(), value)
|
||||
})
|
||||
} else {
|
||||
L.SetField(scriptDataTable, "result", res)
|
||||
}
|
||||
return 0
|
||||
}))
|
||||
|
||||
errTable := L.NewTable()
|
||||
L.SetField(scriptDataTable, "error", errTable)
|
||||
L.SetField(outTable, "send_error", L.NewFunction(func(L *lua.LState) int {
|
||||
var params [3]lua.LValue
|
||||
for i := range 3 {
|
||||
params[i] = L.Get(i + 1)
|
||||
}
|
||||
if errTable, ok := scriptDataTable.RawGetString("error").(*lua.LTable); ok {
|
||||
for _, v := range params {
|
||||
switch v.Type() {
|
||||
case lua.LTNumber:
|
||||
if n, ok := v.(lua.LNumber); ok {
|
||||
L.SetField(errTable, "code", n)
|
||||
}
|
||||
case lua.LTString:
|
||||
if s, ok := v.(lua.LString); ok {
|
||||
L.SetField(errTable, "message", s)
|
||||
}
|
||||
case lua.LTTable:
|
||||
if tbl, ok := v.(*lua.LTable); ok {
|
||||
L.SetField(errTable, "data", tbl)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__exit = 1
|
||||
L.RaiseError("__unsuccessfull")
|
||||
return 0
|
||||
}))
|
||||
|
||||
L.SetField(outTable, "set_error", L.NewFunction(func(L *lua.LState) int {
|
||||
var params [3]lua.LValue
|
||||
for i := range 3 {
|
||||
params[i] = L.Get(i + 1)
|
||||
}
|
||||
if errTable, ok := scriptDataTable.RawGetString("error").(*lua.LTable); ok {
|
||||
for _, v := range params {
|
||||
switch v.Type() {
|
||||
case lua.LTNumber:
|
||||
if n, ok := v.(lua.LNumber); ok {
|
||||
L.SetField(errTable, "code", n)
|
||||
}
|
||||
case lua.LTString:
|
||||
if s, ok := v.(lua.LString); ok {
|
||||
L.SetField(errTable, "message", s)
|
||||
}
|
||||
case lua.LTTable:
|
||||
if tbl, ok := v.(*lua.LTable); ok {
|
||||
L.SetField(errTable, "data", tbl)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}))
|
||||
|
||||
L.SetField(sessionMod, "request", inTable)
|
||||
L.SetField(sessionMod, "response", outTable)
|
||||
|
||||
L.SetField(sessionMod, "id", lua.LString(sid))
|
||||
|
||||
L.SetField(sessionMod, "__seed", lua.LString(fmt.Sprint(seed)))
|
||||
L.Push(sessionMod)
|
||||
return 1
|
||||
}
|
||||
|
||||
loadLogMod := func(L *lua.LState) int {
|
||||
llog.Debug("import module log", slog.String("script", path))
|
||||
logMod := L.NewTable()
|
||||
|
||||
logFuncs := map[string]func(string, ...any){
|
||||
"info": llog.Info,
|
||||
"debug": llog.Debug,
|
||||
"error": llog.Error,
|
||||
"warn": llog.Warn,
|
||||
}
|
||||
|
||||
for name, logFunc := range logFuncs {
|
||||
fun := logFunc
|
||||
L.SetField(logMod, name, L.NewFunction(func(L *lua.LState) int {
|
||||
msg := L.Get(1)
|
||||
converted := ConvertLuaTypesToGolang(msg)
|
||||
fun(fmt.Sprintf("the script says: %s", converted), slog.String("script", path))
|
||||
return 0
|
||||
}))
|
||||
}
|
||||
|
||||
for _, fn := range []struct {
|
||||
field string
|
||||
pfunc func(string, ...any)
|
||||
color func() string
|
||||
}{
|
||||
{"event", h.x.Log.Printf, nil},
|
||||
{"event_error", h.x.Log.Printf, colors.PrintError},
|
||||
{"event_warn", h.x.Log.Printf, colors.PrintWarn},
|
||||
} {
|
||||
L.SetField(logMod, fn.field, L.NewFunction(func(L *lua.LState) int {
|
||||
msg := L.Get(1)
|
||||
converted := ConvertLuaTypesToGolang(msg)
|
||||
if fn.color != nil {
|
||||
h.x.Log.Printf("%s: %s: %s", fn.color(), path, converted)
|
||||
} else {
|
||||
h.x.Log.Printf("%s: %s", path, converted)
|
||||
}
|
||||
return 0
|
||||
}))
|
||||
}
|
||||
|
||||
L.SetField(logMod, "__seed", lua.LString(fmt.Sprint(seed)))
|
||||
L.Push(logMod)
|
||||
return 1
|
||||
}
|
||||
|
||||
loadNetMod := func(L *lua.LState) int {
|
||||
llog.Debug("import module net", slog.String("script", path))
|
||||
netMod := L.NewTable()
|
||||
netModhttp := L.NewTable()
|
||||
|
||||
L.SetField(netModhttp, "get_request", L.NewFunction(func(L *lua.LState) int {
|
||||
logRequest := L.ToBool(1)
|
||||
url := L.ToString(2)
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
|
||||
addInitiatorHeaders(sid, r, req.Header)
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
|
||||
if logRequest {
|
||||
llog.Info("HTTP GET request",
|
||||
slog.String("script", path),
|
||||
slog.String("url", url),
|
||||
slog.Int("status", resp.StatusCode),
|
||||
slog.String("status_text", resp.Status),
|
||||
slog.String("initiator_ip", req.Header.Get("X-Initiator-IP")),
|
||||
)
|
||||
}
|
||||
|
||||
result := L.NewTable()
|
||||
L.SetField(result, "status", lua.LNumber(resp.StatusCode))
|
||||
L.SetField(result, "status_text", lua.LString(resp.Status))
|
||||
L.SetField(result, "body", lua.LString(body))
|
||||
L.SetField(result, "content_length", lua.LNumber(resp.ContentLength))
|
||||
|
||||
headers := L.NewTable()
|
||||
for k, v := range resp.Header {
|
||||
L.SetField(headers, k, ConvertGolangTypesToLua(L, v))
|
||||
}
|
||||
L.SetField(result, "headers", headers)
|
||||
|
||||
L.Push(result)
|
||||
return 1
|
||||
}))
|
||||
|
||||
L.SetField(netModhttp, "post_request", L.NewFunction(func(L *lua.LState) int {
|
||||
logRequest := L.ToBool(1)
|
||||
url := L.ToString(2)
|
||||
contentType := L.ToString(3)
|
||||
payload := L.ToString(4)
|
||||
|
||||
body := strings.NewReader(payload)
|
||||
|
||||
req, err := http.NewRequest("POST", url, body)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
|
||||
addInitiatorHeaders(sid, r, req.Header)
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
|
||||
if logRequest {
|
||||
llog.Info("HTTP POST request",
|
||||
slog.String("script", path),
|
||||
slog.String("url", url),
|
||||
slog.String("content_type", contentType),
|
||||
slog.Int("status", resp.StatusCode),
|
||||
slog.String("status_text", resp.Status),
|
||||
slog.String("initiator_ip", req.Header.Get("X-Initiator-IP")),
|
||||
)
|
||||
}
|
||||
|
||||
result := L.NewTable()
|
||||
L.SetField(result, "status", lua.LNumber(resp.StatusCode))
|
||||
L.SetField(result, "status_text", lua.LString(resp.Status))
|
||||
L.SetField(result, "body", lua.LString(respBody))
|
||||
L.SetField(result, "content_length", lua.LNumber(resp.ContentLength))
|
||||
|
||||
headers := L.NewTable()
|
||||
for k, v := range resp.Header {
|
||||
L.SetField(headers, k, ConvertGolangTypesToLua(L, v))
|
||||
}
|
||||
L.SetField(result, "headers", headers)
|
||||
|
||||
L.Push(result)
|
||||
return 1
|
||||
}))
|
||||
|
||||
L.SetField(netMod, "http", netModhttp)
|
||||
|
||||
L.SetField(netMod, "__seed", lua.LString(fmt.Sprint(seed)))
|
||||
L.Push(netMod)
|
||||
return 1
|
||||
}
|
||||
|
||||
loadCryptbcryptMod := func(L *lua.LState) int {
|
||||
llog.Debug("import module crypt.bcrypt", slog.String("script", path))
|
||||
bcryptMod := L.NewTable()
|
||||
|
||||
L.SetField(bcryptMod, "MinCost", lua.LNumber(bcrypt.MinCost))
|
||||
L.SetField(bcryptMod, "MaxCost", lua.LNumber(bcrypt.MaxCost))
|
||||
L.SetField(bcryptMod, "DefaultCost", lua.LNumber(bcrypt.DefaultCost))
|
||||
|
||||
L.SetField(bcryptMod, "generate", L.NewFunction(func(l *lua.LState) int {
|
||||
password := ConvertLuaTypesToGolang(L.Get(1))
|
||||
passwordStr, ok := password.(string)
|
||||
if !ok {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString("error: password must be a string"))
|
||||
return 2
|
||||
}
|
||||
|
||||
cost := ConvertLuaTypesToGolang(L.Get(2))
|
||||
costInt := bcrypt.DefaultCost
|
||||
switch v := cost.(type) {
|
||||
case int:
|
||||
costInt = v
|
||||
case float64:
|
||||
costInt = int(v)
|
||||
case nil:
|
||||
// ok, use DefaultCost
|
||||
default:
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString("error: cost must be an integer"))
|
||||
return 2
|
||||
}
|
||||
|
||||
hashBytes, err := bcrypt.GenerateFromPassword([]byte(passwordStr), costInt)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString("error: " + err.Error()))
|
||||
return 2
|
||||
}
|
||||
|
||||
L.Push(lua.LString(string(hashBytes)))
|
||||
L.Push(lua.LNil)
|
||||
return 2
|
||||
}))
|
||||
|
||||
L.SetField(bcryptMod, "compare", L.NewFunction(func(l *lua.LState) int {
|
||||
hash := ConvertLuaTypesToGolang(L.Get(1))
|
||||
hashStr, ok := hash.(string)
|
||||
if !ok {
|
||||
L.Push(lua.LString("error: hash must be a string"))
|
||||
return 1
|
||||
}
|
||||
password := ConvertLuaTypesToGolang(L.Get(2))
|
||||
passwordStr, ok := password.(string)
|
||||
if !ok {
|
||||
L.Push(lua.LString("error: password must be a string"))
|
||||
return 1
|
||||
}
|
||||
|
||||
err := bcrypt.CompareHashAndPassword([]byte(hashStr), []byte(passwordStr))
|
||||
if err != nil {
|
||||
L.Push(lua.LFalse)
|
||||
return 1
|
||||
}
|
||||
L.Push(lua.LTrue)
|
||||
return 1
|
||||
}))
|
||||
|
||||
L.SetField(bcryptMod, "__seed", lua.LString(fmt.Sprint(seed)))
|
||||
L.Push(bcryptMod)
|
||||
return 1
|
||||
}
|
||||
|
||||
loadCryptbsha256Mod := func(L *lua.LState) int {
|
||||
llog.Debug("import module crypt.sha256", slog.String("script", path))
|
||||
sha265mod := L.NewTable()
|
||||
|
||||
L.SetField(sha265mod, "hash", L.NewFunction(func(l *lua.LState) int {
|
||||
data := ConvertLuaTypesToGolang(L.Get(1))
|
||||
var dataStr = fmt.Sprint(data)
|
||||
|
||||
hash := sha256.Sum256([]byte(dataStr))
|
||||
|
||||
L.Push(lua.LString(hex.EncodeToString(hash[:])))
|
||||
L.Push(lua.LNil)
|
||||
return 2
|
||||
}))
|
||||
|
||||
L.SetField(sha265mod, "__seed", lua.LString(fmt.Sprint(seed)))
|
||||
L.Push(sha265mod)
|
||||
return 1
|
||||
}
|
||||
|
||||
L.PreloadModule("internal.session", loadSessionMod)
|
||||
L.PreloadModule("internal.log", loadLogMod)
|
||||
L.PreloadModule("internal.net", loadNetMod)
|
||||
L.PreloadModule("internal.database.sqlite", loadDBMod(llog, fmt.Sprint(seed)))
|
||||
L.PreloadModule("internal.crypt.bcrypt", loadCryptbcryptMod)
|
||||
L.PreloadModule("internal.crypt.sha256", loadCryptbsha256Mod)
|
||||
L.PreloadModule("internal.crypt.jwt", loadJWTMod(llog, fmt.Sprint(seed)))
|
||||
|
||||
llog.Debug("preparing environment")
|
||||
prep := filepath.Join(*h.x.Config.Conf.Node.ComDir, "_prepare.lua")
|
||||
if _, err := os.Stat(prep); err == nil {
|
||||
if err := L.DoFile(prep); err != nil {
|
||||
llog.Error("script error", slog.String("script", path), slog.String("error", err.Error()))
|
||||
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
|
||||
}
|
||||
}
|
||||
llog.Debug("executing script", slog.String("script", path))
|
||||
err := L.DoFile(path)
|
||||
if err != nil && __exit != 0 && __exit != 1 {
|
||||
llog.Error("script error", slog.String("script", path), slog.String("error", err.Error()))
|
||||
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
|
||||
}
|
||||
|
||||
pkg := L.GetGlobal("package")
|
||||
pkgTbl, ok := pkg.(*lua.LTable)
|
||||
if !ok {
|
||||
llog.Error("script error", slog.String("script", path), slog.String("error", "package not found"))
|
||||
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
|
||||
}
|
||||
|
||||
loaded := pkgTbl.RawGetString("loaded")
|
||||
loadedTbl, ok := loaded.(*lua.LTable)
|
||||
if !ok {
|
||||
llog.Error("script error", slog.String("script", path), slog.String("error", "package.loaded not found"))
|
||||
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
|
||||
}
|
||||
|
||||
sessionVal := loadedTbl.RawGetString("internal.session")
|
||||
sessionTbl, ok := sessionVal.(*lua.LTable)
|
||||
if !ok {
|
||||
return rpc.NewResponse(nil, req.ID)
|
||||
}
|
||||
|
||||
tag := sessionTbl.RawGetString("__seed")
|
||||
if tag.Type() != lua.LTString || tag.String() != fmt.Sprint(seed) {
|
||||
llog.Debug("stock session module is not imported: wrong seed", slog.String("script", path))
|
||||
return rpc.NewResponse(nil, req.ID)
|
||||
}
|
||||
|
||||
outVal := sessionTbl.RawGetString("response")
|
||||
outTbl, ok := outVal.(*lua.LTable)
|
||||
if !ok {
|
||||
llog.Error("script error", slog.String("script", path), slog.String("error", "response is not a table"))
|
||||
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
|
||||
}
|
||||
|
||||
if scriptDataTable, ok := outTbl.RawGetString("__script_data").(*lua.LTable); ok {
|
||||
switch __exit {
|
||||
case 1:
|
||||
if errTbl, ok := scriptDataTable.RawGetString("error").(*lua.LTable); ok {
|
||||
llog.Debug("catch error table", slog.String("script", path))
|
||||
code := rpc.ErrInternalError
|
||||
message := rpc.ErrInternalErrorS
|
||||
if c := errTbl.RawGetString("code"); c.Type() == lua.LTNumber {
|
||||
code = int(c.(lua.LNumber))
|
||||
}
|
||||
if msg := errTbl.RawGetString("message"); msg.Type() == lua.LTString {
|
||||
message = msg.String()
|
||||
}
|
||||
data := ConvertLuaTypesToGolang(errTbl.RawGetString("data"))
|
||||
llog.Error("the script terminated with an error", slog.Int("code", code), slog.String("message", message), slog.Any("data", data))
|
||||
return rpc.NewError(code, message, data, req.ID)
|
||||
}
|
||||
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
|
||||
case 0:
|
||||
resVal := ConvertLuaTypesToGolang(scriptDataTable.RawGetString("result"))
|
||||
return rpc.NewResponse(resVal, req.ID)
|
||||
}
|
||||
}
|
||||
return rpc.NewResponse(nil, req.ID)
|
||||
}
|
||||
126
src/internal/server/sv1/lua_types.go
Normal file
126
src/internal/server/sv1/lua_types.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package sv1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
lua "github.com/yuin/gopher-lua"
|
||||
)
|
||||
|
||||
func ConvertLuaTypesToGolang(value lua.LValue) any {
|
||||
switch value.Type() {
|
||||
case lua.LTString:
|
||||
return value.String()
|
||||
case lua.LTNumber:
|
||||
return float64(value.(lua.LNumber))
|
||||
case lua.LTBool:
|
||||
return bool(value.(lua.LBool))
|
||||
case lua.LTTable:
|
||||
tbl := value.(*lua.LTable)
|
||||
|
||||
maxIdx := 0
|
||||
isArray := true
|
||||
|
||||
var isNumeric = false
|
||||
tbl.ForEach(func(key, _ lua.LValue) {
|
||||
var numKey lua.LValue
|
||||
var ok bool
|
||||
switch key.Type() {
|
||||
case lua.LTString:
|
||||
numKey, ok = key.(lua.LString)
|
||||
if !ok {
|
||||
isArray = false
|
||||
return
|
||||
}
|
||||
case lua.LTNumber:
|
||||
numKey, ok = key.(lua.LNumber)
|
||||
if !ok {
|
||||
isArray = false
|
||||
return
|
||||
}
|
||||
isNumeric = true
|
||||
}
|
||||
|
||||
num, err := strconv.Atoi(numKey.String())
|
||||
if err != nil {
|
||||
isArray = false
|
||||
return
|
||||
}
|
||||
if num < 1 {
|
||||
isArray = false
|
||||
return
|
||||
}
|
||||
if num > maxIdx {
|
||||
maxIdx = num
|
||||
}
|
||||
})
|
||||
|
||||
if isArray {
|
||||
arr := make([]any, maxIdx)
|
||||
if isNumeric {
|
||||
for i := 1; i <= maxIdx; i++ {
|
||||
arr[i-1] = ConvertLuaTypesToGolang(tbl.RawGetInt(i))
|
||||
}
|
||||
} else {
|
||||
for i := 1; i <= maxIdx; i++ {
|
||||
arr[i-1] = ConvertLuaTypesToGolang(tbl.RawGetString(strconv.Itoa(i)))
|
||||
}
|
||||
}
|
||||
return arr
|
||||
}
|
||||
result := make(map[string]any)
|
||||
tbl.ForEach(func(key, val lua.LValue) {
|
||||
result[key.String()] = ConvertLuaTypesToGolang(val)
|
||||
})
|
||||
return result
|
||||
|
||||
case lua.LTNil:
|
||||
return nil
|
||||
default:
|
||||
return value.String()
|
||||
}
|
||||
}
|
||||
|
||||
func ConvertGolangTypesToLua(L *lua.LState, val any) lua.LValue {
|
||||
if val == nil {
|
||||
return lua.LNil
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(val)
|
||||
rt := rv.Type()
|
||||
|
||||
switch rt.Kind() {
|
||||
case reflect.String:
|
||||
return lua.LString(rv.String())
|
||||
case reflect.Bool:
|
||||
return lua.LBool(rv.Bool())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return lua.LNumber(rv.Int())
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return lua.LNumber(rv.Uint())
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return lua.LNumber(rv.Float())
|
||||
|
||||
case reflect.Slice, reflect.Array:
|
||||
tbl := L.NewTable()
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
tbl.RawSetInt(i+1, ConvertGolangTypesToLua(L, rv.Index(i).Interface()))
|
||||
}
|
||||
return tbl
|
||||
|
||||
case reflect.Map:
|
||||
if rt.Key().Kind() == reflect.String {
|
||||
tbl := L.NewTable()
|
||||
for _, key := range rv.MapKeys() {
|
||||
val := rv.MapIndex(key)
|
||||
tbl.RawSetString(key.String(), ConvertGolangTypesToLua(L, val.Interface()))
|
||||
}
|
||||
return tbl
|
||||
}
|
||||
|
||||
default:
|
||||
return lua.LString(fmt.Sprintf("%v", val))
|
||||
}
|
||||
return lua.LString(fmt.Sprintf("%v", val))
|
||||
}
|
||||
28
src/internal/server/sv1/path.go
Normal file
28
src/internal/server/sv1/path.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package sv1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
|
||||
)
|
||||
|
||||
var RPCMethodSeparator = "."
|
||||
|
||||
func (h *HandlerV1) resolveMethodPath(method string) (string, error) {
|
||||
if !h.allowedCmd.MatchString(method) {
|
||||
return "", errors.New(rpc.ErrInvalidMethodFormatS)
|
||||
}
|
||||
|
||||
parts := strings.Split(method, RPCMethodSeparator)
|
||||
relPath := filepath.Join(parts...) + ".lua"
|
||||
fullPath := filepath.Join(*h.x.Config.Conf.Node.ComDir, relPath)
|
||||
|
||||
if _, err := os.Stat(fullPath); os.IsNotExist(err) {
|
||||
return "", errors.New(rpc.ErrMethodNotFoundS)
|
||||
}
|
||||
|
||||
return fullPath, nil
|
||||
}
|
||||
@@ -3,30 +3,29 @@
|
||||
package sv1
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"regexp"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
|
||||
)
|
||||
|
||||
var SV1Version = "v1"
|
||||
|
||||
// HandlerV1InitStruct structure is only for initialization
|
||||
type HandlerV1InitStruct struct {
|
||||
Ver string
|
||||
Log slog.Logger
|
||||
Config *config.Conf
|
||||
AllowedCmd *regexp.Regexp
|
||||
ListAllowedCmd *regexp.Regexp
|
||||
Ver string
|
||||
CS *corestate.CoreState
|
||||
X *app.AppX
|
||||
AllowedCmd *regexp.Regexp
|
||||
}
|
||||
|
||||
// HandlerV1 implements the ServerV1UtilsContract and serves as the main handler for API requests.
|
||||
type HandlerV1 struct {
|
||||
log *slog.Logger
|
||||
|
||||
cfg *config.Conf
|
||||
cs *corestate.CoreState
|
||||
x *app.AppX
|
||||
|
||||
// allowedCmd and listAllowedCmd are regular expressions used to validate command names.
|
||||
allowedCmd *regexp.Regexp
|
||||
listAllowedCmd *regexp.Regexp
|
||||
allowedCmd *regexp.Regexp
|
||||
|
||||
ver string
|
||||
}
|
||||
@@ -36,11 +35,10 @@ type HandlerV1 struct {
|
||||
// because there is no validation of parameters in this function.
|
||||
func InitV1Server(o *HandlerV1InitStruct) *HandlerV1 {
|
||||
return &HandlerV1{
|
||||
log: &o.Log,
|
||||
cfg: o.Config,
|
||||
allowedCmd: o.AllowedCmd,
|
||||
listAllowedCmd: o.ListAllowedCmd,
|
||||
ver: o.Ver,
|
||||
cs: o.CS,
|
||||
x: o.X,
|
||||
allowedCmd: o.AllowedCmd,
|
||||
ver: o.Ver,
|
||||
}
|
||||
}
|
||||
|
||||
12
src/internal/server/sv2/handle.go
Normal file
12
src/internal/server/sv2/handle.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package sv2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
|
||||
)
|
||||
|
||||
func (h *Handler) Handle(_ context.Context, sid string, r *http.Request, req *rpc.RPCRequest) *rpc.RPCResponse {
|
||||
return nil
|
||||
}
|
||||
43
src/internal/server/sv2/server.go
Normal file
43
src/internal/server/sv2/server.go
Normal file
@@ -0,0 +1,43 @@
|
||||
// SV2 works with binaries, scripts, and anything else that has access to stdin/stdout.
|
||||
// Modules run in a separate process and communicate via I/O.
|
||||
package sv2
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
|
||||
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
|
||||
)
|
||||
|
||||
// HandlerV2InitStruct structure is only for initialization
|
||||
type HandlerInitStruct struct {
|
||||
Ver string
|
||||
CS *corestate.CoreState
|
||||
X *app.AppX
|
||||
AllowedCmd *regexp.Regexp
|
||||
}
|
||||
|
||||
type Handler struct {
|
||||
cs *corestate.CoreState
|
||||
x *app.AppX
|
||||
|
||||
// allowedCmd and listAllowedCmd are regular expressions used to validate command names.
|
||||
allowedCmd *regexp.Regexp
|
||||
|
||||
ver string
|
||||
}
|
||||
|
||||
func InitServer(o *HandlerInitStruct) *Handler {
|
||||
return &Handler{
|
||||
cs: o.CS,
|
||||
x: o.X,
|
||||
allowedCmd: o.AllowedCmd,
|
||||
ver: o.Ver,
|
||||
}
|
||||
}
|
||||
|
||||
// GetVersion returns the API version of the HandlerV1, which is set during initialization.
|
||||
// This version is used to identify the API version in the request routing.
|
||||
func (h *Handler) GetVersion() string {
|
||||
return h.ver
|
||||
}
|
||||
11
src/main.go
Normal file
11
src/main.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// Package main used only for calling cmd.Execute()
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/akyaiy/GoSally-mvp/src/cmd"
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
}
|
||||
14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
@@ -1,14 +0,0 @@
|
||||
freebsd_task:
|
||||
name: 'FreeBSD'
|
||||
freebsd_instance:
|
||||
image_family: freebsd-14-2
|
||||
install_script:
|
||||
- pkg update -f
|
||||
- pkg install -y go
|
||||
test_script:
|
||||
# run tests as user "cirrus" instead of root
|
||||
- pw useradd cirrus -m
|
||||
- chown -R cirrus:cirrus .
|
||||
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...
|
||||
10
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
10
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
@@ -1,10 +0,0 @@
|
||||
# go test -c output
|
||||
*.test
|
||||
*.test.exe
|
||||
|
||||
# Output of go build ./cmd/fsnotify
|
||||
/fsnotify
|
||||
/fsnotify.exe
|
||||
|
||||
/test/kqueue
|
||||
/test/a.out
|
||||
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
@@ -1,2 +0,0 @@
|
||||
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
||||
Nathan Youngman <git@nathany.com> <4566+nathany@users.noreply.github.com>
|
||||
602
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
602
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
@@ -1,602 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
1.9.0 2024-04-04
|
||||
----------------
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- all: make BufferedWatcher buffered again ([#657])
|
||||
|
||||
- inotify: fix race when adding/removing watches while a watched path is being
|
||||
deleted ([#678], [#686])
|
||||
|
||||
- inotify: don't send empty event if a watched path is unmounted ([#655])
|
||||
|
||||
- inotify: don't register duplicate watches when watching both a symlink and its
|
||||
target; previously that would get "half-added" and removing the second would
|
||||
panic ([#679])
|
||||
|
||||
- kqueue: fix watching relative symlinks ([#681])
|
||||
|
||||
- kqueue: correctly mark pre-existing entries when watching a link to a dir on
|
||||
kqueue ([#682])
|
||||
|
||||
- illumos: don't send error if changed file is deleted while processing the
|
||||
event ([#678])
|
||||
|
||||
|
||||
[#657]: https://github.com/fsnotify/fsnotify/pull/657
|
||||
[#678]: https://github.com/fsnotify/fsnotify/pull/678
|
||||
[#686]: https://github.com/fsnotify/fsnotify/pull/686
|
||||
[#655]: https://github.com/fsnotify/fsnotify/pull/655
|
||||
[#681]: https://github.com/fsnotify/fsnotify/pull/681
|
||||
[#679]: https://github.com/fsnotify/fsnotify/pull/679
|
||||
[#682]: https://github.com/fsnotify/fsnotify/pull/682
|
||||
|
||||
1.8.0 2024-10-31
|
||||
----------------
|
||||
|
||||
### Additions
|
||||
|
||||
- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
|
||||
|
||||
- kqueue: ignore events with Ident=0 ([#590])
|
||||
|
||||
- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
|
||||
|
||||
- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
|
||||
|
||||
- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
|
||||
|
||||
- inotify: fix panic when calling Remove() in a goroutine ([#650])
|
||||
|
||||
- fen: allow watching subdirectories of watched directories ([#621])
|
||||
|
||||
[#590]: https://github.com/fsnotify/fsnotify/pull/590
|
||||
[#610]: https://github.com/fsnotify/fsnotify/pull/610
|
||||
[#617]: https://github.com/fsnotify/fsnotify/pull/617
|
||||
[#619]: https://github.com/fsnotify/fsnotify/pull/619
|
||||
[#620]: https://github.com/fsnotify/fsnotify/pull/620
|
||||
[#621]: https://github.com/fsnotify/fsnotify/pull/621
|
||||
[#625]: https://github.com/fsnotify/fsnotify/pull/625
|
||||
[#650]: https://github.com/fsnotify/fsnotify/pull/650
|
||||
|
||||
1.7.0 - 2023-10-22
|
||||
------------------
|
||||
This version of fsnotify needs Go 1.17.
|
||||
|
||||
### Additions
|
||||
|
||||
- illumos: add FEN backend to support illumos and Solaris. ([#371])
|
||||
|
||||
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
|
||||
in cases where you can't control the kernel buffer and receive a large number
|
||||
of events in bursts. ([#550], [#572])
|
||||
|
||||
- all: add `AddWith()`, which is identical to `Add()` but allows passing
|
||||
options. ([#521])
|
||||
|
||||
- windows: allow setting the ReadDirectoryChangesW() buffer size with
|
||||
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
|
||||
works on all platforms and is enough for most purposes, but in some cases a
|
||||
highest buffer is needed. ([#521])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- inotify: remove watcher if a watched path is renamed ([#518])
|
||||
|
||||
After a rename the reported name wasn't updated, or even an empty string.
|
||||
Inotify doesn't provide any good facilities to update it, so just remove the
|
||||
watcher. This is already how it worked on kqueue and FEN.
|
||||
|
||||
On Windows this does work, and remains working.
|
||||
|
||||
- windows: don't listen for file attribute changes ([#520])
|
||||
|
||||
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
|
||||
with no way to see if they're a file write or attribute change, so would show
|
||||
up as a fsnotify.Write event. This is never useful, and could result in many
|
||||
spurious Write events.
|
||||
|
||||
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
|
||||
|
||||
Before it would merely return "short read", making it hard to detect this
|
||||
error.
|
||||
|
||||
- kqueue: make sure events for all files are delivered properly when removing a
|
||||
watched directory ([#526])
|
||||
|
||||
Previously they would get sent with `""` (empty string) or `"."` as the path
|
||||
name.
|
||||
|
||||
- kqueue: don't emit spurious Create events for symbolic links ([#524])
|
||||
|
||||
The link would get resolved but kqueue would "forget" it already saw the link
|
||||
itself, resulting on a Create for every Write event for the directory.
|
||||
|
||||
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
|
||||
|
||||
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
|
||||
`backend_other.go`, making it easier to use on unsupported platforms such as
|
||||
WASM, AIX, etc. ([#528])
|
||||
|
||||
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
|
||||
Google AppEngine forbids usage of the unsafe package so the inotify backend
|
||||
won't compile there.
|
||||
|
||||
[#371]: https://github.com/fsnotify/fsnotify/pull/371
|
||||
[#516]: https://github.com/fsnotify/fsnotify/pull/516
|
||||
[#518]: https://github.com/fsnotify/fsnotify/pull/518
|
||||
[#520]: https://github.com/fsnotify/fsnotify/pull/520
|
||||
[#521]: https://github.com/fsnotify/fsnotify/pull/521
|
||||
[#524]: https://github.com/fsnotify/fsnotify/pull/524
|
||||
[#525]: https://github.com/fsnotify/fsnotify/pull/525
|
||||
[#526]: https://github.com/fsnotify/fsnotify/pull/526
|
||||
[#528]: https://github.com/fsnotify/fsnotify/pull/528
|
||||
[#537]: https://github.com/fsnotify/fsnotify/pull/537
|
||||
[#550]: https://github.com/fsnotify/fsnotify/pull/550
|
||||
[#572]: https://github.com/fsnotify/fsnotify/pull/572
|
||||
|
||||
1.6.0 - 2022-10-13
|
||||
------------------
|
||||
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
|
||||
but not documented). It also increases the minimum Linux version to 2.6.32.
|
||||
|
||||
### Additions
|
||||
|
||||
- all: add `Event.Has()` and `Op.Has()` ([#477])
|
||||
|
||||
This makes checking events a lot easier; for example:
|
||||
|
||||
if event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
}
|
||||
|
||||
Becomes:
|
||||
|
||||
if event.Has(Write) && !event.Has(Remove) {
|
||||
}
|
||||
|
||||
- all: add cmd/fsnotify ([#463])
|
||||
|
||||
A command-line utility for testing and some examples.
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- inotify: don't ignore events for files that don't exist ([#260], [#470])
|
||||
|
||||
Previously the inotify watcher would call `os.Lstat()` to check if a file
|
||||
still exists before emitting events.
|
||||
|
||||
This was inconsistent with other platforms and resulted in inconsistent event
|
||||
reporting (e.g. when a file is quickly removed and re-created), and generally
|
||||
a source of confusion. It was added in 2013 to fix a memory leak that no
|
||||
longer exists.
|
||||
|
||||
- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
|
||||
not watched ([#460])
|
||||
|
||||
- inotify: replace epoll() with non-blocking inotify ([#434])
|
||||
|
||||
Non-blocking inotify was not generally available at the time this library was
|
||||
written in 2014, but now it is. As a result, the minimum Linux version is
|
||||
bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
|
||||
|
||||
- kqueue: don't check for events every 100ms ([#480])
|
||||
|
||||
The watcher would wake up every 100ms, even when there was nothing to do. Now
|
||||
it waits until there is something to do.
|
||||
|
||||
- macos: retry opening files on EINTR ([#475])
|
||||
|
||||
- kqueue: skip unreadable files ([#479])
|
||||
|
||||
kqueue requires a file descriptor for every file in a directory; this would
|
||||
fail if a file was unreadable by the current user. Now these files are simply
|
||||
skipped.
|
||||
|
||||
- windows: fix renaming a watched directory if the parent is also watched ([#370])
|
||||
|
||||
- windows: increase buffer size from 4K to 64K ([#485])
|
||||
|
||||
- windows: close file handle on Remove() ([#288])
|
||||
|
||||
- kqueue: put pathname in the error if watching a file fails ([#471])
|
||||
|
||||
- inotify, windows: calling Close() more than once could race ([#465])
|
||||
|
||||
- kqueue: improve Close() performance ([#233])
|
||||
|
||||
- all: various documentation additions and clarifications.
|
||||
|
||||
[#233]: https://github.com/fsnotify/fsnotify/pull/233
|
||||
[#260]: https://github.com/fsnotify/fsnotify/pull/260
|
||||
[#288]: https://github.com/fsnotify/fsnotify/pull/288
|
||||
[#370]: https://github.com/fsnotify/fsnotify/pull/370
|
||||
[#434]: https://github.com/fsnotify/fsnotify/pull/434
|
||||
[#460]: https://github.com/fsnotify/fsnotify/pull/460
|
||||
[#463]: https://github.com/fsnotify/fsnotify/pull/463
|
||||
[#465]: https://github.com/fsnotify/fsnotify/pull/465
|
||||
[#470]: https://github.com/fsnotify/fsnotify/pull/470
|
||||
[#471]: https://github.com/fsnotify/fsnotify/pull/471
|
||||
[#475]: https://github.com/fsnotify/fsnotify/pull/475
|
||||
[#477]: https://github.com/fsnotify/fsnotify/pull/477
|
||||
[#479]: https://github.com/fsnotify/fsnotify/pull/479
|
||||
[#480]: https://github.com/fsnotify/fsnotify/pull/480
|
||||
[#485]: https://github.com/fsnotify/fsnotify/pull/485
|
||||
|
||||
## [1.5.4] - 2022-04-25
|
||||
|
||||
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
|
||||
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
|
||||
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
|
||||
|
||||
## [1.5.3] - 2022-04-22
|
||||
|
||||
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
|
||||
|
||||
## [1.5.2] - 2022-04-21
|
||||
|
||||
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
|
||||
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
|
||||
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
|
||||
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
|
||||
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
|
||||
|
||||
## [1.5.1] - 2021-08-24
|
||||
|
||||
* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
|
||||
|
||||
## [1.5.0] - 2021-08-20
|
||||
|
||||
* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||
* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
|
||||
* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
|
||||
* CI: Use GitHub Actions for CI and cover go 1.12-1.17
|
||||
[#378](https://github.com/fsnotify/fsnotify/pull/378)
|
||||
[#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||
[#385](https://github.com/fsnotify/fsnotify/pull/385)
|
||||
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
|
||||
|
||||
## [1.4.9] - 2020-03-11
|
||||
|
||||
* Move example usage to the readme #329. This may resolve #328.
|
||||
|
||||
## [1.4.8] - 2020-03-10
|
||||
|
||||
* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
|
||||
* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
|
||||
* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
|
||||
* CI: Less verbosity (@nathany #267)
|
||||
* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
|
||||
* Tests: Check if channels are closed in the example (@alexeykazakov #244)
|
||||
* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
|
||||
* CI: Add windows to travis matrix (@cpuguy83 #284)
|
||||
* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
|
||||
* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
|
||||
* Linux: open files with close-on-exec (@linxiulei #273)
|
||||
* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
|
||||
* Project: Add go.mod (@nathany #309)
|
||||
* Project: Revise editor config (@nathany #309)
|
||||
* Project: Update copyright for 2019 (@nathany #309)
|
||||
* CI: Drop go1.8 from CI matrix (@nathany #309)
|
||||
* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
|
||||
|
||||
## [1.4.7] - 2018-01-09
|
||||
|
||||
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
|
||||
* Tests: Fix missing verb on format string (thanks @rchiossi)
|
||||
* Linux: Fix deadlock in Remove (thanks @aarondl)
|
||||
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
|
||||
* Docs: Moved FAQ into the README (thanks @vahe)
|
||||
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
|
||||
* Docs: replace references to OS X with macOS
|
||||
|
||||
## [1.4.2] - 2016-10-10
|
||||
|
||||
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||
|
||||
## [1.4.1] - 2016-10-04
|
||||
|
||||
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||
|
||||
## [1.4.0] - 2016-10-01
|
||||
|
||||
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||
|
||||
## [1.3.1] - 2016-06-28
|
||||
|
||||
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||
|
||||
## [1.3.0] - 2016-04-19
|
||||
|
||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||
|
||||
## [1.2.10] - 2016-03-02
|
||||
|
||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||
|
||||
## [1.2.9] - 2016-01-13
|
||||
|
||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||
|
||||
## [1.2.8] - 2015-12-17
|
||||
|
||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||
* inotify: fix race in test
|
||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||
|
||||
## [1.2.5] - 2015-10-17
|
||||
|
||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||
|
||||
## [1.2.1] - 2015-10-14
|
||||
|
||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||
|
||||
## [1.2.0] - 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||
|
||||
## [1.1.1] - 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## [1.1.0] - 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
* only need to store flags on directories
|
||||
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||
* done can be an unbuffered channel
|
||||
* remove calls to os.NewSyscallError
|
||||
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## [1.0.4] - 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## [1.0.3] - 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||
|
||||
## [1.0.2] - 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## [1.0.0] - 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||
* Minor updates based on feedback from golint.
|
||||
|
||||
## dev / 2014-07-09
|
||||
|
||||
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||
|
||||
## dev / 2014-07-04
|
||||
|
||||
* kqueue: fix incorrect mutex used in Close()
|
||||
* Update example to demonstrate usage of Op.
|
||||
|
||||
## dev / 2014-06-28
|
||||
|
||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||
* Fix for String() method on Event (thanks Alex Brainman)
|
||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||
|
||||
## dev / 2014-06-21
|
||||
|
||||
* Events channel of type Event rather than *Event.
|
||||
* [internal] use syscall constants directly for inotify and kqueue.
|
||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||
|
||||
## dev / 2014-06-19
|
||||
|
||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||
* [internal] remove cookie from Event struct (unused).
|
||||
* [internal] Event struct has the same definition across every OS.
|
||||
* [internal] remove internal watch and removeWatch methods.
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||
* [API] Pluralized channel names: Events and Errors.
|
||||
* [API] Renamed FileEvent struct to Event.
|
||||
* [API] Op constants replace methods like IsCreate().
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## dev / 2014-05-23
|
||||
|
||||
* [API] Remove current implementation of WatchFlags.
|
||||
* current implementation doesn't take advantage of OS for efficiency
|
||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## [0.9.3] - 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## [0.9.2] - 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## [0.9.1] - 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## [0.9.0] - 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## [0.8.12] - 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## [0.8.11] - 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
||||
|
||||
## [0.8.10] - 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## [0.8.9] - 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## [0.8.8] - 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## [0.8.7] - 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## [0.8.6] - 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## [0.8.5] - 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## [0.8.4] - 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## [0.8.3] - 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## [0.8.2] - 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## [0.8.1] - 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## [0.8.0] - 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## [0.7.4] - 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## [0.7.3] - 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## [0.7.2] - 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## [0.7.1] - 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## [0.7.0] - 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## [0.6.0] - 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## [0.5.1] - 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## [0.5.0] - 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## [0.4.0] - 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## [0.3.0] - 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## [0.2.0] - 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## [0.1.0] - 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
||||
145
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
145
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
@@ -1,145 +0,0 @@
|
||||
Thank you for your interest in contributing to fsnotify! We try to review and
|
||||
merge PRs in a reasonable timeframe, but please be aware that:
|
||||
|
||||
- To avoid "wasted" work, please discuss changes on the issue tracker first. You
|
||||
can just send PRs, but they may end up being rejected for one reason or the
|
||||
other.
|
||||
|
||||
- fsnotify is a cross-platform library, and changes must work reasonably well on
|
||||
all supported platforms.
|
||||
|
||||
- Changes will need to be compatible; old code should still compile, and the
|
||||
runtime behaviour can't change in ways that are likely to lead to problems for
|
||||
users.
|
||||
|
||||
Testing
|
||||
-------
|
||||
Just `go test ./...` runs all the tests; the CI runs this on all supported
|
||||
platforms. Testing different platforms locally can be done with something like
|
||||
[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
|
||||
|
||||
Use the `-short` flag to make the "stress test" run faster.
|
||||
|
||||
Writing new tests
|
||||
-----------------
|
||||
Scripts in the testdata directory allow creating test cases in a "shell-like"
|
||||
syntax. The basic format is:
|
||||
|
||||
script
|
||||
|
||||
Output:
|
||||
desired output
|
||||
|
||||
For example:
|
||||
|
||||
# Create a new empty file with some data.
|
||||
watch /
|
||||
echo data >/file
|
||||
|
||||
Output:
|
||||
create /file
|
||||
write /file
|
||||
|
||||
Just create a new file to add a new test; select which tests to run with
|
||||
`-run TestScript/[path]`.
|
||||
|
||||
script
|
||||
------
|
||||
The script is a "shell-like" script:
|
||||
|
||||
cmd arg arg
|
||||
|
||||
Comments are supported with `#`:
|
||||
|
||||
# Comment
|
||||
cmd arg arg # Comment
|
||||
|
||||
All operations are done in a temp directory; a path like "/foo" is rewritten to
|
||||
"/tmp/TestFoo/foo".
|
||||
|
||||
Arguments can be quoted with `"` or `'`; there are no escapes and they're
|
||||
functionally identical right now, but this may change in the future, so best to
|
||||
assume shell-like rules.
|
||||
|
||||
touch "/file with spaces"
|
||||
|
||||
End-of-line escapes with `\` are not supported.
|
||||
|
||||
### Supported commands
|
||||
|
||||
watch path [ops] # Watch the path, reporting events for it. Nothing is
|
||||
# watched by default. Optionally a list of ops can be
|
||||
# given, as with AddWith(path, WithOps(...)).
|
||||
unwatch path # Stop watching the path.
|
||||
watchlist n # Assert watchlist length.
|
||||
|
||||
stop # Stop running the script; for debugging.
|
||||
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
|
||||
parallel by default, so -parallel=1 is probably a good
|
||||
idea).
|
||||
print [any strings] # Print text to stdout; for debugging.
|
||||
|
||||
touch path
|
||||
mkdir [-p] dir
|
||||
ln -s target link # Only ln -s supported.
|
||||
mkfifo path
|
||||
mknod dev path
|
||||
mv src dst
|
||||
rm [-r] path
|
||||
chmod mode path # Octal only
|
||||
sleep time-in-ms
|
||||
|
||||
cat path # Read path (does nothing with the data; just reads it).
|
||||
echo str >>path # Append "str" to "path".
|
||||
echo str >path # Truncate "path" and write "str".
|
||||
|
||||
require reason # Skip the test if "reason" is true; "skip" and
|
||||
skip reason # "require" behave identical; it supports both for
|
||||
# readability. Possible reasons are:
|
||||
#
|
||||
# always Always skip this test.
|
||||
# symlink Symlinks are supported (requires admin
|
||||
# permissions on Windows).
|
||||
# mkfifo Platform doesn't support FIFO named sockets.
|
||||
# mknod Platform doesn't support device nodes.
|
||||
|
||||
|
||||
output
|
||||
------
|
||||
After `Output:` the desired output is given; this is indented by convention, but
|
||||
that's not required.
|
||||
|
||||
The format of that is:
|
||||
|
||||
# Comment
|
||||
event path # Comment
|
||||
|
||||
system:
|
||||
event path
|
||||
system2:
|
||||
event path
|
||||
|
||||
Every event is one line, and any whitespace between the event and path are
|
||||
ignored. The path can optionally be surrounded in ". Anything after a "#" is
|
||||
ignored.
|
||||
|
||||
Platform-specific tests can be added after GOOS; for example:
|
||||
|
||||
watch /
|
||||
touch /file
|
||||
|
||||
Output:
|
||||
# Tested if nothing else matches
|
||||
create /file
|
||||
|
||||
# Windows-specific test.
|
||||
windows:
|
||||
write /file
|
||||
|
||||
You can specify multiple platforms with a comma (e.g. "windows, linux:").
|
||||
"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
|
||||
|
||||
|
||||
[goon]: https://github.com/arp242/goon
|
||||
[Vagrant]: https://www.vagrantup.com/
|
||||
[integration_test.go]: /integration_test.go
|
||||
25
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
25
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
@@ -1,25 +0,0 @@
|
||||
Copyright © 2012 The Go Authors. All rights reserved.
|
||||
Copyright © fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
* Neither the name of Google Inc. nor the names of its contributors may be used
|
||||
to endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
182
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
182
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
@@ -1,182 +0,0 @@
|
||||
fsnotify is a Go library to provide cross-platform filesystem notifications on
|
||||
Windows, Linux, macOS, BSD, and illumos.
|
||||
|
||||
Go 1.17 or newer is required; the full documentation is at
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
---
|
||||
|
||||
Platform support:
|
||||
|
||||
| Backend | OS | Status |
|
||||
| :-------------------- | :--------- | :------------------------------------------------------------------------ |
|
||||
| inotify | Linux | Supported |
|
||||
| kqueue | BSD, macOS | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FEN | illumos | Supported |
|
||||
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
|
||||
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
|
||||
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
Linux and illumos should include Android and Solaris, but these are currently
|
||||
untested.
|
||||
|
||||
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
|
||||
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
|
||||
|
||||
Usage
|
||||
-----
|
||||
A basic example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create new watcher.
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
// Start listening for events.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("event:", event)
|
||||
if event.Has(fsnotify.Write) {
|
||||
log.Println("modified file:", event.Name)
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Add a path.
|
||||
err = watcher.Add("/tmp")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Block main goroutine forever.
|
||||
<-make(chan struct{})
|
||||
}
|
||||
```
|
||||
|
||||
Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
|
||||
run with:
|
||||
|
||||
% go run ./cmd/fsnotify
|
||||
|
||||
Further detailed documentation can be found in godoc:
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
FAQ
|
||||
---
|
||||
### Will a file still be watched when it's moved to another directory?
|
||||
No, not unless you are watching the location it was moved to.
|
||||
|
||||
### Are subdirectories watched?
|
||||
No, you must add watches for any directory you want to watch (a recursive
|
||||
watcher is on the roadmap: [#18]).
|
||||
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
|
||||
### Do I have to watch the Error and Event channels in a goroutine?
|
||||
Yes. You can read both channels in the same goroutine using `select` (you don't
|
||||
need a separate goroutine for both channels; see the example).
|
||||
|
||||
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
|
||||
fsnotify requires support from underlying OS to work. The current NFS and SMB
|
||||
protocols does not provide network level support for file notifications, and
|
||||
neither do the /proc and /sys virtual filesystems.
|
||||
|
||||
This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
|
||||
|
||||
[#9]: https://github.com/fsnotify/fsnotify/issues/9
|
||||
|
||||
### Why do I get many Chmod events?
|
||||
Some programs may generate a lot of attribute changes; for example Spotlight on
|
||||
macOS, anti-virus programs, backup applications, and some others are known to do
|
||||
this. As a rule, it's typically best to ignore Chmod events. They're often not
|
||||
useful, and tend to cause problems.
|
||||
|
||||
Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
temporary workaround is to add your folder(s) to the *Spotlight Privacy
|
||||
settings* until we have a native FSEvents implementation (see [#11]).
|
||||
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
|
||||
### Watching a file doesn't work well
|
||||
Watching individual files (rather than directories) is generally not recommended
|
||||
as many programs (especially editors) update files atomically: it will write to
|
||||
a temporary file which is then moved to to destination, overwriting the original
|
||||
(or some variant thereof). The watcher on the original file is now lost, as that
|
||||
no longer exists.
|
||||
|
||||
The upshot of this is that a power failure or crash won't leave a half-written
|
||||
file.
|
||||
|
||||
Watch the parent directory and use `Event.Name` to filter out files you're not
|
||||
interested in. There is an example of this in `cmd/fsnotify/file.go`.
|
||||
|
||||
Platform-specific notes
|
||||
-----------------------
|
||||
### Linux
|
||||
When a file is removed a REMOVE event won't be emitted until all file
|
||||
descriptors are closed; it will emit a CHMOD instead:
|
||||
|
||||
fp := os.Open("file")
|
||||
os.Remove("file") // CHMOD
|
||||
fp.Close() // REMOVE
|
||||
|
||||
This is the event that inotify sends, so not much can be changed about this.
|
||||
|
||||
The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
|
||||
the number of watches per user, and `fs.inotify.max_user_instances` specifies
|
||||
the maximum number of inotify instances per user. Every Watcher you create is an
|
||||
"instance", and every path you add is a "watch".
|
||||
|
||||
These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
|
||||
`/proc/sys/fs/inotify/max_user_instances`
|
||||
|
||||
To increase them you can use `sysctl` or write the value to proc file:
|
||||
|
||||
# The default values on Linux 5.18
|
||||
sysctl fs.inotify.max_user_watches=124983
|
||||
sysctl fs.inotify.max_user_instances=128
|
||||
|
||||
To make the changes persist on reboot edit `/etc/sysctl.conf` or
|
||||
`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
|
||||
distro's documentation):
|
||||
|
||||
fs.inotify.max_user_watches=124983
|
||||
fs.inotify.max_user_instances=128
|
||||
|
||||
Reaching the limit will result in a "no space left on device" or "too many open
|
||||
files" error.
|
||||
|
||||
### kqueue (macOS, all BSD systems)
|
||||
kqueue requires opening a file descriptor for every file that's being watched;
|
||||
so if you're watching a directory with five files then that's six file
|
||||
descriptors. You will run in to your system's "max open files" limit faster on
|
||||
these platforms.
|
||||
|
||||
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
|
||||
control the maximum number of open files.
|
||||
467
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
467
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
@@ -1,467 +0,0 @@
|
||||
//go:build solaris
|
||||
|
||||
// FEN backend for illumos (supported) and Solaris (untested, but should work).
|
||||
//
|
||||
// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type fen struct {
|
||||
*shared
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
mu sync.Mutex
|
||||
port *unix.EventPort
|
||||
dirs map[string]Op // Explicitly watched directories
|
||||
watches map[string]Op // Explicitly watched non-directories
|
||||
}
|
||||
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
w := &fen{
|
||||
shared: newShared(ev, errs),
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
dirs: make(map[string]Op),
|
||||
watches: make(map[string]Op),
|
||||
}
|
||||
|
||||
var err error
|
||||
w.port, err = unix.NewEventPort()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *fen) Close() error {
|
||||
if w.shared.close() {
|
||||
return nil
|
||||
}
|
||||
return w.port.Close()
|
||||
}
|
||||
|
||||
func (w *fen) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *fen) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
// Currently we resolve symlinks that were explicitly requested to be
|
||||
// watched. Otherwise we would use LStat here.
|
||||
stat, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Associate all files in the directory.
|
||||
if stat.IsDir() {
|
||||
err := w.handleDirectory(name, stat, true, w.associateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.dirs[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
err = w.associateFile(name, stat, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if !w.port.PathIsWatched(name) {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
// The user has expressed an intent. Immediately remove this name from
|
||||
// whichever watch list it might be in. If it's not in there the delete
|
||||
// doesn't cause harm.
|
||||
w.mu.Lock()
|
||||
delete(w.watches, name)
|
||||
delete(w.dirs, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
stat, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove associations for every file in the directory.
|
||||
if stat.IsDir() {
|
||||
err := w.handleDirectory(name, stat, false, w.dissociateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err = w.port.DissociatePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents contains the main loop that runs in a goroutine watching for events.
|
||||
func (w *fen) readEvents() {
|
||||
// If this function returns, the watcher has been closed and we can close
|
||||
// these channels
|
||||
defer func() {
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
pevents := make([]unix.PortEvent, 8)
|
||||
for {
|
||||
count, err := w.port.Get(pevents, 1, nil)
|
||||
if err != nil && err != unix.ETIME {
|
||||
// Interrupted system call (count should be 0) ignore and continue
|
||||
if errors.Is(err, unix.EINTR) && count == 0 {
|
||||
continue
|
||||
}
|
||||
// Get failed because we called w.Close()
|
||||
if errors.Is(err, unix.EBADF) && w.isClosed() {
|
||||
return
|
||||
}
|
||||
// There was an error not caused by calling w.Close()
|
||||
if !w.sendError(fmt.Errorf("port.Get: %w", err)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
p := pevents[:count]
|
||||
for _, pevent := range p {
|
||||
if pevent.Source != unix.PORT_SOURCE_FILE {
|
||||
// Event from unexpected source received; should never happen.
|
||||
if !w.sendError(errors.New("Event from unexpected source received")) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(pevent.Path, pevent.Events)
|
||||
}
|
||||
|
||||
err = w.handleEvent(&pevent)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle all children of the directory.
|
||||
for _, entry := range files {
|
||||
finfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// And finally handle the directory itself.
|
||||
return handler(path, stat, follow)
|
||||
}
|
||||
|
||||
// handleEvent might need to emit more than one fsnotify event if the events
|
||||
// bitmap matches more than one event type (e.g. the file was both modified and
|
||||
// had the attributes changed between when the association was created and the
|
||||
// when event was returned)
|
||||
func (w *fen) handleEvent(event *unix.PortEvent) error {
|
||||
var (
|
||||
events = event.Events
|
||||
path = event.Path
|
||||
fmode = event.Cookie.(os.FileMode)
|
||||
reRegister = true
|
||||
)
|
||||
|
||||
w.mu.Lock()
|
||||
_, watchedDir := w.dirs[path]
|
||||
_, watchedPath := w.watches[path]
|
||||
w.mu.Unlock()
|
||||
isWatched := watchedDir || watchedPath
|
||||
|
||||
if events&unix.FILE_DELETE != 0 {
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_FROM != 0 {
|
||||
if !w.sendEvent(Event{Name: path, Op: Rename}) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the new file name
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_TO != 0 {
|
||||
// We don't report a Rename event for this case, because Rename events
|
||||
// are interpreted as referring to the _old_ name of the file, and in
|
||||
// this case the event would refer to the new name of the file. This
|
||||
// type of rename event is not supported by fsnotify.
|
||||
|
||||
// inotify reports a Remove event in this case, so we simulate this
|
||||
// here.
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the file that was removed
|
||||
reRegister = false
|
||||
}
|
||||
|
||||
// The file is gone, nothing left to do.
|
||||
if !reRegister {
|
||||
if watchedDir {
|
||||
w.mu.Lock()
|
||||
delete(w.dirs, path)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
if watchedPath {
|
||||
w.mu.Lock()
|
||||
delete(w.watches, path)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we didn't get a deletion the file still exists and we're going to have
|
||||
// to watch it again. Let's Stat it now so that we can compare permissions
|
||||
// and have what we need to continue watching the file
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
// This is unexpected, but we should still emit an event. This happens
|
||||
// most often on "rm -r" of a subdirectory inside a watched directory We
|
||||
// get a modify event of something happening inside, but by the time we
|
||||
// get here, the sudirectory is already gone. Clearly we were watching
|
||||
// this path but now it is gone. Let's tell the user that it was
|
||||
// removed.
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Suppress extra write events on removed directories; they are not
|
||||
// informative and can be confusing.
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolve symlinks that were explicitly watched as we would have at Add()
|
||||
// time. this helps suppress spurious Chmod events on watched symlinks
|
||||
if isWatched {
|
||||
stat, err = os.Stat(path)
|
||||
if err != nil {
|
||||
// The symlink still exists, but the target is gone. Report the
|
||||
// Remove similar to above.
|
||||
if !w.sendEvent(Event{Name: path, Op: Remove}) {
|
||||
return nil
|
||||
}
|
||||
// Don't return the error
|
||||
}
|
||||
}
|
||||
|
||||
if events&unix.FILE_MODIFIED != 0 {
|
||||
if fmode.IsDir() && watchedDir {
|
||||
if err := w.updateDirectory(path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !w.sendEvent(Event{Name: path, Op: Write}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if events&unix.FILE_ATTRIB != 0 && stat != nil {
|
||||
// Only send Chmod if perms changed
|
||||
if stat.Mode().Perm() != fmode.Perm() {
|
||||
if !w.sendEvent(Event{Name: path, Op: Chmod}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if stat != nil {
|
||||
// If we get here, it means we've hit an event above that requires us to
|
||||
// continue watching the file or directory
|
||||
err := w.associateFile(path, stat, isWatched)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// Path may have been removed since the stat.
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The directory was modified, so we must find unwatched entities and watch
|
||||
// them. If something was removed from the directory, nothing will happen, as
|
||||
// everything else should still be watched.
|
||||
func (w *fen) updateDirectory(path string) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
// Directory no longer exists: probably just deleted since we got the
|
||||
// event.
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range files {
|
||||
path := filepath.Join(path, entry.Name())
|
||||
if w.port.PathIsWatched(path) {
|
||||
continue
|
||||
}
|
||||
|
||||
finfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.associateFile(path, finfo, false)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// File may have disappeared between getting the dir listing and
|
||||
// adding the port: that's okay to ignore.
|
||||
continue
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return nil
|
||||
}
|
||||
if !w.sendEvent(Event{Name: path, Op: Create}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
// This is primarily protecting the call to AssociatePath but it is
|
||||
// important and intentional that the call to PathIsWatched is also
|
||||
// protected by this mutex. Without this mutex, AssociatePath has been seen
|
||||
// to error out that the path is already associated.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if w.port.PathIsWatched(path) {
|
||||
// Remove the old association in favor of this one If we get ENOENT,
|
||||
// then while the x/sys/unix wrapper still thought that this path was
|
||||
// associated, the underlying event port did not. This call will have
|
||||
// cleared up that discrepancy. The most likely cause is that the event
|
||||
// has fired but we haven't processed it yet.
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil && !errors.Is(err, unix.ENOENT) {
|
||||
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
var events int
|
||||
if !follow {
|
||||
// Watch symlinks themselves rather than their targets unless this entry
|
||||
// is explicitly watched.
|
||||
events |= unix.FILE_NOFOLLOW
|
||||
}
|
||||
if true { // TODO: implement withOps()
|
||||
events |= unix.FILE_MODIFIED
|
||||
}
|
||||
if true {
|
||||
events |= unix.FILE_ATTRIB
|
||||
}
|
||||
err := w.port.AssociatePath(path, stat, events, stat.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("port.AssociatePath(%q): %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||
if !w.port.PathIsWatched(path) {
|
||||
return nil
|
||||
}
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches)+len(w.dirs))
|
||||
for pathname := range w.dirs {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
func (w *fen) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
583
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
583
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
@@ -1,583 +0,0 @@
|
||||
//go:build linux && !appengine
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type inotify struct {
|
||||
*shared
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
// Store fd here as os.File.Read() will no longer return on close after
|
||||
// calling Fd(). See: https://github.com/golang/go/issues/26439
|
||||
fd int
|
||||
inotifyFile *os.File
|
||||
watches *watches
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
|
||||
// Store rename cookies in an array, with the index wrapping to 0. Almost
|
||||
// all of the time what we get is a MOVED_FROM to set the cookie and the
|
||||
// next event inotify sends will be MOVED_TO to read it. However, this is
|
||||
// not guaranteed – as described in inotify(7) – and we may get other events
|
||||
// between the two MOVED_* events (including other MOVED_* ones).
|
||||
//
|
||||
// A second issue is that moving a file outside the watched directory will
|
||||
// trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
|
||||
// read and delete it. So just storing it in a map would slowly leak memory.
|
||||
//
|
||||
// Doing it like this gives us a simple fast LRU-cache that won't allocate.
|
||||
// Ten items should be more than enough for our purpose, and a loop over
|
||||
// such a short array is faster than a map access anyway (not that it hugely
|
||||
// matters since we're talking about hundreds of ns at the most, but still).
|
||||
cookies [10]koekje
|
||||
cookieIndex uint8
|
||||
cookiesMu sync.Mutex
|
||||
}
|
||||
|
||||
type (
|
||||
watches struct {
|
||||
wd map[uint32]*watch // wd → watch
|
||||
path map[string]uint32 // pathname → wd
|
||||
}
|
||||
watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
path string // Watch path.
|
||||
recurse bool // Recursion with ./...?
|
||||
}
|
||||
koekje struct {
|
||||
cookie uint32
|
||||
path string
|
||||
}
|
||||
)
|
||||
|
||||
func newWatches() *watches {
|
||||
return &watches{
|
||||
wd: make(map[uint32]*watch),
|
||||
path: make(map[string]uint32),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] }
|
||||
func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] }
|
||||
func (w *watches) len() int { return len(w.wd) }
|
||||
func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd }
|
||||
func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) }
|
||||
|
||||
func (w *watches) removePath(path string) ([]uint32, error) {
|
||||
path, recurse := recursivePath(path)
|
||||
wd, ok := w.path[path]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
|
||||
}
|
||||
|
||||
watch := w.wd[wd]
|
||||
if recurse && !watch.recurse {
|
||||
return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
|
||||
}
|
||||
|
||||
delete(w.path, path)
|
||||
delete(w.wd, wd)
|
||||
if !watch.recurse {
|
||||
return []uint32{wd}, nil
|
||||
}
|
||||
|
||||
wds := make([]uint32, 0, 8)
|
||||
wds = append(wds, wd)
|
||||
for p, rwd := range w.path {
|
||||
if strings.HasPrefix(p, path) {
|
||||
delete(w.path, p)
|
||||
delete(w.wd, rwd)
|
||||
wds = append(wds, rwd)
|
||||
}
|
||||
}
|
||||
return wds, nil
|
||||
}
|
||||
|
||||
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
|
||||
var existing *watch
|
||||
wd, ok := w.path[path]
|
||||
if ok {
|
||||
existing = w.wd[wd]
|
||||
}
|
||||
|
||||
upd, err := f(existing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if upd != nil {
|
||||
w.wd[upd.wd] = upd
|
||||
w.path[upd.path] = upd.wd
|
||||
|
||||
if upd.wd != wd {
|
||||
delete(w.wd, wd)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
|
||||
// I/O operations won't terminate on close.
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
w := &inotify{
|
||||
shared: newShared(ev, errs),
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
fd: fd,
|
||||
inotifyFile: os.NewFile(uintptr(fd), ""),
|
||||
watches: newWatches(),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *inotify) Close() error {
|
||||
if w.shared.close() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Causes any blocking reads to return with an error, provided the file
|
||||
// still supports deadline operations.
|
||||
err := w.inotifyFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-w.doneResp // Wait for readEvents() to finish.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *inotify) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *inotify) AddWith(path string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), path)
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
add := func(path string, with withOpts, recurse bool) error {
|
||||
var flags uint32
|
||||
if with.noFollow {
|
||||
flags |= unix.IN_DONT_FOLLOW
|
||||
}
|
||||
if with.op.Has(Create) {
|
||||
flags |= unix.IN_CREATE
|
||||
}
|
||||
if with.op.Has(Write) {
|
||||
flags |= unix.IN_MODIFY
|
||||
}
|
||||
if with.op.Has(Remove) {
|
||||
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
}
|
||||
if with.op.Has(Rename) {
|
||||
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
|
||||
}
|
||||
if with.op.Has(Chmod) {
|
||||
flags |= unix.IN_ATTRIB
|
||||
}
|
||||
if with.op.Has(xUnportableOpen) {
|
||||
flags |= unix.IN_OPEN
|
||||
}
|
||||
if with.op.Has(xUnportableRead) {
|
||||
flags |= unix.IN_ACCESS
|
||||
}
|
||||
if with.op.Has(xUnportableCloseWrite) {
|
||||
flags |= unix.IN_CLOSE_WRITE
|
||||
}
|
||||
if with.op.Has(xUnportableCloseRead) {
|
||||
flags |= unix.IN_CLOSE_NOWRITE
|
||||
}
|
||||
return w.register(path, flags, recurse)
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
path, recurse := recursivePath(path)
|
||||
if recurse {
|
||||
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() {
|
||||
if root == path {
|
||||
return fmt.Errorf("fsnotify: not a directory: %q", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send a Create event when adding new directory from a recursive
|
||||
// watch; this is for "mkdir -p one/two/three". Usually all those
|
||||
// directories will be created before we can set up watchers on the
|
||||
// subdirectories, so only "one" would be sent as a Create event and
|
||||
// not "one/two" and "one/two/three" (inotifywait -r has the same
|
||||
// problem).
|
||||
if with.sendCreate && root != path {
|
||||
w.sendEvent(Event{Name: root, Op: Create})
|
||||
}
|
||||
|
||||
return add(root, with, true)
|
||||
})
|
||||
}
|
||||
|
||||
return add(path, with, false)
|
||||
}
|
||||
|
||||
func (w *inotify) register(path string, flags uint32, recurse bool) error {
|
||||
return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
|
||||
if existing != nil {
|
||||
flags |= existing.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
|
||||
wd, err := unix.InotifyAddWatch(w.fd, path, flags)
|
||||
if wd == -1 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if e, ok := w.watches.wd[uint32(wd)]; ok {
|
||||
return e, nil
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return &watch{
|
||||
wd: uint32(wd),
|
||||
path: path,
|
||||
flags: flags,
|
||||
recurse: recurse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
existing.wd = uint32(wd)
|
||||
existing.flags = flags
|
||||
return existing, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (w *inotify) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.remove(filepath.Clean(name))
|
||||
}
|
||||
|
||||
func (w *inotify) remove(name string) error {
|
||||
wds, err := w.watches.removePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, wd := range wds {
|
||||
_, err := unix.InotifyRmWatch(w.fd, wd)
|
||||
if err != nil {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every
|
||||
// case; the only two possible errors are:
|
||||
//
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of
|
||||
// any kind.
|
||||
//
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is
|
||||
// not a valid watch descriptor. Watch descriptors are invalidated
|
||||
// when they are removed explicitly or implicitly; explicitly by
|
||||
// inotify_rm_watch, implicitly when the file they are watching is
|
||||
// deleted.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *inotify) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
entries := make([]string, 0, w.watches.len())
|
||||
for pathname := range w.watches.path {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *inotify) readEvents() {
|
||||
defer func() {
|
||||
close(w.doneResp)
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
for {
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := w.inotifyFile.Read(buf[:])
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrClosed) {
|
||||
return
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
err := errors.New("notify: short read in readEvents()") // Read was too short.
|
||||
if n == 0 {
|
||||
err = io.EOF // If EOF is received. This should really never happen.
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't know how many events we just read into the buffer While the
|
||||
// offset points to at least one whole event.
|
||||
var offset uint32
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
// Point to the event in the buffer.
|
||||
inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
if !w.sendError(ErrEventOverflow) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ev, ok := w.handleEvent(inEvent, &buf, offset)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if !w.sendEvent(ev) {
|
||||
return
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + inEvent.Len
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
/// If the event happened to the watched directory or the watched file, the
|
||||
/// kernel doesn't append the filename to the event, but we would like to
|
||||
/// always fill the the "Name" field with a valid filename. We retrieve the
|
||||
/// path of the watch from the "paths" map.
|
||||
///
|
||||
/// Can be nil if Remove() was called in another goroutine for this path
|
||||
/// inbetween reading the events from the kernel and reading the internal
|
||||
/// state. Not much we can do about it, so just skip. See #616.
|
||||
watch := w.watches.byWd(uint32(inEvent.Wd))
|
||||
if watch == nil {
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
var (
|
||||
name = watch.path
|
||||
nameLen = uint32(inEvent.Len)
|
||||
)
|
||||
if nameLen > 0 {
|
||||
/// Point "bytes" at the first byte of the filename
|
||||
bb := *buf
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00")
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(name, inEvent.Mask, inEvent.Cookie)
|
||||
}
|
||||
|
||||
if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 {
|
||||
w.watches.remove(watch)
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
// inotify will automatically remove the watch on deletes; just need
|
||||
// to clean our state here.
|
||||
if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
w.watches.remove(watch)
|
||||
}
|
||||
|
||||
// We can't really update the state when a watched path is moved; only
|
||||
// IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch.
|
||||
if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||
if watch.recurse { // Do nothing
|
||||
return Event{}, true
|
||||
}
|
||||
|
||||
err := w.remove(watch.path)
|
||||
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
|
||||
if !w.sendError(err) {
|
||||
return Event{}, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Skip if we're watching both this path and the parent; the parent will
|
||||
/// already send a delete so no need to do it twice.
|
||||
if inEvent.Mask&unix.IN_DELETE_SELF != 0 {
|
||||
_, ok := w.watches.path[filepath.Dir(watch.path)]
|
||||
if ok {
|
||||
return Event{}, true
|
||||
}
|
||||
}
|
||||
|
||||
ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie)
|
||||
// Need to update watch path for recurse.
|
||||
if watch.recurse {
|
||||
isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR
|
||||
/// New directory created: set up watch on it.
|
||||
if isDir && ev.Has(Create) {
|
||||
err := w.register(ev.Name, watch.flags, true)
|
||||
if !w.sendError(err) {
|
||||
return Event{}, false
|
||||
}
|
||||
|
||||
// This was a directory rename, so we need to update all the
|
||||
// children.
|
||||
//
|
||||
// TODO: this is of course pretty slow; we should use a better data
|
||||
// structure for storing all of this, e.g. store children in the
|
||||
// watch. I have some code for this in my kqueue refactor we can use
|
||||
// in the future. For now I'm okay with this as it's not publicly
|
||||
// available. Correctness first, performance second.
|
||||
if ev.renamedFrom != "" {
|
||||
for k, ww := range w.watches.wd {
|
||||
if k == watch.wd || ww.path == ev.Name {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(ww.path, ev.renamedFrom) {
|
||||
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
|
||||
w.watches.wd[k] = ww
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ev, true
|
||||
}
|
||||
|
||||
func (w *inotify) isRecursive(path string) bool {
|
||||
ww := w.watches.byPath(path)
|
||||
if ww == nil { // path could be a file, so also check the Dir.
|
||||
ww = w.watches.byPath(filepath.Dir(path))
|
||||
}
|
||||
return ww != nil && ww.recurse
|
||||
}
|
||||
|
||||
func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_OPEN == unix.IN_OPEN {
|
||||
e.Op |= xUnportableOpen
|
||||
}
|
||||
if mask&unix.IN_ACCESS == unix.IN_ACCESS {
|
||||
e.Op |= xUnportableRead
|
||||
}
|
||||
if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
|
||||
e.Op |= xUnportableCloseWrite
|
||||
}
|
||||
if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
|
||||
e.Op |= xUnportableCloseRead
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
|
||||
if cookie != 0 {
|
||||
if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
w.cookiesMu.Lock()
|
||||
w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
|
||||
w.cookieIndex++
|
||||
if w.cookieIndex > 9 {
|
||||
w.cookieIndex = 0
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
} else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
w.cookiesMu.Lock()
|
||||
var prev string
|
||||
for _, c := range w.cookies {
|
||||
if c.cookie == cookie {
|
||||
prev = c.path
|
||||
break
|
||||
}
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
e.renamedFrom = prev
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (w *inotify) xSupports(op Op) bool {
|
||||
return true // Supports everything.
|
||||
}
|
||||
|
||||
func (w *inotify) state() {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
for wd, ww := range w.watches.wd {
|
||||
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
|
||||
}
|
||||
}
|
||||
705
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
705
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
@@ -1,705 +0,0 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type kqueue struct {
|
||||
*shared
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
closepipe [2]int // Pipe used for closing kq.
|
||||
watches *watches
|
||||
}
|
||||
|
||||
type (
|
||||
watches struct {
|
||||
mu sync.RWMutex
|
||||
wd map[int]watch // wd → watch
|
||||
path map[string]int // pathname → wd
|
||||
byDir map[string]map[int]struct{} // dirname(path) → wd
|
||||
seen map[string]struct{} // Keep track of if we know this file exists.
|
||||
byUser map[string]struct{} // Watches added with Watcher.Add()
|
||||
}
|
||||
watch struct {
|
||||
wd int
|
||||
name string
|
||||
linkName string // In case of links; name is the target, and this is the link.
|
||||
isDir bool
|
||||
dirFlags uint32
|
||||
}
|
||||
)
|
||||
|
||||
func newWatches() *watches {
|
||||
return &watches{
|
||||
wd: make(map[int]watch),
|
||||
path: make(map[string]int),
|
||||
byDir: make(map[string]map[int]struct{}),
|
||||
seen: make(map[string]struct{}),
|
||||
byUser: make(map[string]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) listPaths(userOnly bool) []string {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
if userOnly {
|
||||
l := make([]string, 0, len(w.byUser))
|
||||
for p := range w.byUser {
|
||||
l = append(l, p)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
l := make([]string, 0, len(w.path))
|
||||
for p := range w.path {
|
||||
l = append(l, p)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (w *watches) watchesInDir(path string) []string {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
l := make([]string, 0, 4)
|
||||
for fd := range w.byDir[path] {
|
||||
info := w.wd[fd]
|
||||
if _, ok := w.byUser[info.name]; !ok {
|
||||
l = append(l, info.name)
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Mark path as added by the user.
|
||||
func (w *watches) addUserWatch(path string) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.byUser[path] = struct{}{}
|
||||
}
|
||||
|
||||
func (w *watches) addLink(path string, fd int) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.path[path] = fd
|
||||
w.seen[path] = struct{}{}
|
||||
}
|
||||
|
||||
func (w *watches) add(path, linkPath string, fd int, isDir bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.path[path] = fd
|
||||
w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir}
|
||||
|
||||
parent := filepath.Dir(path)
|
||||
byDir, ok := w.byDir[parent]
|
||||
if !ok {
|
||||
byDir = make(map[int]struct{}, 1)
|
||||
w.byDir[parent] = byDir
|
||||
}
|
||||
byDir[fd] = struct{}{}
|
||||
}
|
||||
|
||||
func (w *watches) byWd(fd int) (watch, bool) {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
info, ok := w.wd[fd]
|
||||
return info, ok
|
||||
}
|
||||
|
||||
func (w *watches) byPath(path string) (watch, bool) {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
info, ok := w.wd[w.path[path]]
|
||||
return info, ok
|
||||
}
|
||||
|
||||
func (w *watches) updateDirFlags(path string, flags uint32) bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
fd, ok := w.path[path]
|
||||
if !ok { // Already deleted: don't re-set it here.
|
||||
return false
|
||||
}
|
||||
info := w.wd[fd]
|
||||
info.dirFlags = flags
|
||||
w.wd[fd] = info
|
||||
return true
|
||||
}
|
||||
|
||||
func (w *watches) remove(fd int, path string) bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
isDir := w.wd[fd].isDir
|
||||
delete(w.path, path)
|
||||
delete(w.byUser, path)
|
||||
|
||||
parent := filepath.Dir(path)
|
||||
delete(w.byDir[parent], fd)
|
||||
|
||||
if len(w.byDir[parent]) == 0 {
|
||||
delete(w.byDir, parent)
|
||||
}
|
||||
|
||||
delete(w.wd, fd)
|
||||
delete(w.seen, path)
|
||||
return isDir
|
||||
}
|
||||
|
||||
func (w *watches) markSeen(path string, exists bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if exists {
|
||||
w.seen[path] = struct{}{}
|
||||
} else {
|
||||
delete(w.seen, path)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) seenBefore(path string) bool {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
_, ok := w.seen[path]
|
||||
return ok
|
||||
}
|
||||
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
kq, closepipe, err := newKqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &kqueue{
|
||||
shared: newShared(ev, errs),
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
kq: kq,
|
||||
closepipe: closepipe,
|
||||
watches: newWatches(),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// newKqueue creates a new kernel event queue and returns a descriptor.
|
||||
//
|
||||
// This registers a new event on closepipe, which will trigger an event when
|
||||
// it's closed. This way we can use kevent() without timeout/polling; without
|
||||
// the closepipe, it would block forever and we wouldn't be able to stop it at
|
||||
// all.
|
||||
func newKqueue() (kq int, closepipe [2]int, err error) {
|
||||
kq, err = unix.Kqueue()
|
||||
if err != nil {
|
||||
return kq, closepipe, err
|
||||
}
|
||||
|
||||
// Register the close pipe.
|
||||
err = unix.Pipe(closepipe[:])
|
||||
if err != nil {
|
||||
unix.Close(kq)
|
||||
return kq, closepipe, err
|
||||
}
|
||||
unix.CloseOnExec(closepipe[0])
|
||||
unix.CloseOnExec(closepipe[1])
|
||||
|
||||
// Register changes to listen on the closepipe.
|
||||
changes := make([]unix.Kevent_t, 1)
|
||||
// SetKevent converts int to the platform-specific types.
|
||||
unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
|
||||
unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
|
||||
|
||||
ok, err := unix.Kevent(kq, changes, nil, nil)
|
||||
if ok == -1 {
|
||||
unix.Close(kq)
|
||||
unix.Close(closepipe[0])
|
||||
unix.Close(closepipe[1])
|
||||
return kq, closepipe, err
|
||||
}
|
||||
return kq, closepipe, nil
|
||||
}
|
||||
|
||||
func (w *kqueue) Close() error {
|
||||
if w.shared.close() {
|
||||
return nil
|
||||
}
|
||||
|
||||
pathsToRemove := w.watches.listPaths(false)
|
||||
for _, name := range pathsToRemove {
|
||||
w.Remove(name)
|
||||
}
|
||||
|
||||
unix.Close(w.closepipe[1]) // Send "quit" message to readEvents
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *kqueue) AddWith(name string, opts ...addOpt) error {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
_, err := w.addWatch(name, noteAllEvents, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.watches.addUserWatch(name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) Remove(name string) error {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
return w.remove(name, true)
|
||||
}
|
||||
|
||||
func (w *kqueue) remove(name string, unwatchFiles bool) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
name = filepath.Clean(name)
|
||||
info, ok := w.watches.byPath(name)
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
|
||||
err := w.register([]int{info.wd}, unix.EV_DELETE, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unix.Close(info.wd)
|
||||
|
||||
isDir := w.watches.remove(info.wd, name)
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if unwatchFiles && isDir {
|
||||
pathsToRemove := w.watches.watchesInDir(name)
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error to
|
||||
// the user, as that will just confuse them with an error about a
|
||||
// path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
return w.watches.listPaths(true)
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// addWatch adds name to the watched file set; the flags are interpreted as
|
||||
// described in kevent(2).
|
||||
//
|
||||
// Returns the real path to the file which was added, with symlinks resolved.
|
||||
func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) {
|
||||
if w.isClosed() {
|
||||
return "", ErrClosed
|
||||
}
|
||||
|
||||
name = filepath.Clean(name)
|
||||
|
||||
info, alreadyWatching := w.watches.byPath(name)
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Don't watch sockets or named pipes.
|
||||
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow symlinks, but only for paths added with Add(), and not paths
|
||||
// we're adding from internalWatch from a listdir.
|
||||
if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
link, err := os.Readlink(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !filepath.IsAbs(link) {
|
||||
link = filepath.Join(filepath.Dir(name), link)
|
||||
}
|
||||
|
||||
_, alreadyWatching = w.watches.byPath(link)
|
||||
if alreadyWatching {
|
||||
// Add to watches so we don't get spurious Create events later
|
||||
// on when we diff the directories.
|
||||
w.watches.addLink(name, 0)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
info.linkName = name
|
||||
name = link
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Retry on EINTR; open() can return EINTR in practice on macOS.
|
||||
// See #354, and Go issues 11180 and 39237.
|
||||
for {
|
||||
info.wd, err = unix.Open(name, openMode, 0)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if errors.Is(err, unix.EINTR) {
|
||||
continue
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
info.isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
|
||||
if err != nil {
|
||||
unix.Close(info.wd)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.watches.add(name, info.linkName, info.wd, info.isDir)
|
||||
}
|
||||
|
||||
// Watch the directory if it has not been watched before, or if it was
|
||||
// watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
if info.isDir {
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
(!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||
if !w.watches.updateDirFlags(name, flags) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if watchDir {
|
||||
d := name
|
||||
if info.linkName != "" {
|
||||
d = info.linkName
|
||||
}
|
||||
if err := w.watchDirectoryFiles(d); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *kqueue) readEvents() {
|
||||
defer func() {
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
_ = unix.Close(w.kq)
|
||||
unix.Close(w.closepipe[0])
|
||||
}()
|
||||
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
for {
|
||||
kevents, err := w.read(eventBuffer)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != unix.EINTR {
|
||||
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, kevent := range kevents {
|
||||
var (
|
||||
wd = int(kevent.Ident)
|
||||
mask = uint32(kevent.Fflags)
|
||||
)
|
||||
|
||||
// Shut down the loop when the pipe is closed, but only after all
|
||||
// other events have been processed.
|
||||
if wd == w.closepipe[0] {
|
||||
return
|
||||
}
|
||||
|
||||
path, ok := w.watches.byWd(wd)
|
||||
if debug {
|
||||
internal.Debug(path.name, &kevent)
|
||||
}
|
||||
|
||||
// On macOS it seems that sometimes an event with Ident=0 is
|
||||
// delivered, and no other flags/information beyond that, even
|
||||
// though we never saw such a file descriptor. For example in
|
||||
// TestWatchSymlink/277 (usually at the end, but sometimes sooner):
|
||||
//
|
||||
// fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent)
|
||||
// unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
|
||||
// unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
|
||||
//
|
||||
// The first is a normal event, the second with Ident 0. No error
|
||||
// flag, no data, no ... nothing.
|
||||
//
|
||||
// I read a bit through bsd/kern_event.c from the xnu source, but I
|
||||
// don't really see an obvious location where this is triggered –
|
||||
// this doesn't seem intentional, but idk...
|
||||
//
|
||||
// Technically fd 0 is a valid descriptor, so only skip it if
|
||||
// there's no path, and if we're on macOS.
|
||||
if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" {
|
||||
continue
|
||||
}
|
||||
|
||||
event := w.newEvent(path.name, path.linkName, mask)
|
||||
|
||||
if event.Has(Rename) || event.Has(Remove) {
|
||||
w.remove(event.Name, false)
|
||||
w.watches.markSeen(event.Name, false)
|
||||
}
|
||||
|
||||
if path.isDir && event.Has(Write) && !event.Has(Remove) {
|
||||
w.dirChange(event.Name)
|
||||
} else if !w.sendEvent(event) {
|
||||
return
|
||||
}
|
||||
|
||||
if event.Has(Remove) {
|
||||
// Look for a file that may have overwritten this; for example,
|
||||
// mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
_, found := w.watches.byPath(fileDir)
|
||||
if found {
|
||||
// TODO: this branch is never triggered in any test.
|
||||
// Added in d6220df (2012).
|
||||
// isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111
|
||||
//
|
||||
// I don't really get how this can be triggered either.
|
||||
// And it wasn't triggered in the patch that added it,
|
||||
// either.
|
||||
//
|
||||
// Original also had a comment:
|
||||
// make sure the directory exists before we watch for
|
||||
// changes. When we do a recursive watch and perform
|
||||
// rm -rf, the parent directory might have gone
|
||||
// missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the
|
||||
// parent directory.
|
||||
err := w.dirChange(fileDir)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
path := filepath.Clean(event.Name)
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
err := w.sendCreateIfNew(path, fi)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func (w *kqueue) newEvent(name, linkName string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if linkName != "" {
|
||||
// If the user watched "/path/link" then emit events as "/path/link"
|
||||
// rather than "/path/target".
|
||||
e.Name = linkName
|
||||
}
|
||||
|
||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
// No point sending a write and delete event at the same time: if it's gone,
|
||||
// then it's gone.
|
||||
if e.Op.Has(Write) && e.Op.Has(Remove) {
|
||||
e.Op &^= Write
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *kqueue) watchDirectoryFiles(dirPath string) error {
|
||||
files, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
path := filepath.Join(dirPath, f.Name())
|
||||
|
||||
fi, err := f.Info()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%q: %w", path, err)
|
||||
}
|
||||
|
||||
cleanPath, err := w.internalWatch(path, fi)
|
||||
if err != nil {
|
||||
// No permission to read the file; that's not a problem: just skip.
|
||||
// But do add it to w.fileExists to prevent it from being picked up
|
||||
// as a "new" file later (it still shows up in the directory
|
||||
// listing).
|
||||
switch {
|
||||
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
|
||||
cleanPath = filepath.Clean(path)
|
||||
default:
|
||||
return fmt.Errorf("%q: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
w.watches.markSeen(cleanPath, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search the directory for new files and send an event for them.
|
||||
//
|
||||
// This functionality is to have the BSD watcher match the inotify, which sends
|
||||
// a create event for files created in a watched directory.
|
||||
func (w *kqueue) dirChange(dir string) error {
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
// Directory no longer exists: we can ignore this safely. kqueue will
|
||||
// still give us the correct events.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err)
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
fi, err := f.Info()
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("fsnotify.dirChange: %w", err)
|
||||
}
|
||||
|
||||
err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
|
||||
if err != nil {
|
||||
// Don't need to send an error if this file isn't readable.
|
||||
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("fsnotify.dirChange: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send a create event if the file isn't already being tracked, and start
|
||||
// watching this file.
|
||||
func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error {
|
||||
if !w.watches.seenBefore(path) {
|
||||
if !w.sendEvent(Event{Name: path, Op: Create}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Like watchDirectoryFiles, but without doing another ReadDir.
|
||||
path, err := w.internalWatch(path, fi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.watches.markSeen(path, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
|
||||
if fi.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories, but preserve
|
||||
// the flags used if currently watching subdirectory
|
||||
info, _ := w.watches.byPath(name)
|
||||
return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true)
|
||||
}
|
||||
|
||||
// Watch file to mimic Linux inotify.
|
||||
return w.addWatch(name, noteAllEvents, true)
|
||||
}
|
||||
|
||||
// Register events with the queue.
|
||||
func (w *kqueue) register(fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]unix.Kevent_t, len(fds))
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types.
|
||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// Register the events.
|
||||
success, err := unix.Kevent(w.kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
|
||||
n, err := unix.Kevent(w.kq, nil, events, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
||||
func (w *kqueue) xSupports(op Op) bool {
|
||||
//if runtime.GOOS == "freebsd" {
|
||||
// return true // Supports everything.
|
||||
//}
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
22
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
22
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
@@ -1,22 +0,0 @@
|
||||
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "errors"
|
||||
|
||||
type other struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
var defaultBufferSize = 0
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return nil, errors.New("fsnotify not supported on the current platform")
|
||||
}
|
||||
func (w *other) Close() error { return nil }
|
||||
func (w *other) WatchList() []string { return nil }
|
||||
func (w *other) Add(name string) error { return nil }
|
||||
func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
|
||||
func (w *other) Remove(name string) error { return nil }
|
||||
func (w *other) xSupports(op Op) bool { return false }
|
||||
680
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
680
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
@@ -1,680 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
// Windows backend based on ReadDirectoryChangesW()
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type readDirChangesW struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
port windows.Handle // Handle to completion port
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
done chan chan<- error
|
||||
|
||||
mu sync.Mutex // Protects access to watches, closed
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
closed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
var defaultBufferSize = 50
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
w := &readDirChangesW{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
done: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) isClosed() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.closed
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
event := w.newEvent(name, uint32(mask))
|
||||
event.renamedFrom = renamedFrom
|
||||
select {
|
||||
case ch := <-w.done:
|
||||
w.done <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *readDirChangesW) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.closed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// Send "done" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.done <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
if with.bufsize < 4096 {
|
||||
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
|
||||
}
|
||||
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
bufsize: with.bufsize,
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for _, entry := range w.watches {
|
||||
for _, watchEntry := range entry {
|
||||
for name := range watchEntry.names {
|
||||
entries = append(entries, filepath.Join(watchEntry.path, name))
|
||||
}
|
||||
// the directory itself is being watched
|
||||
if watchEntry.mask != 0 {
|
||||
entries = append(entries, watchEntry.path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// These options are from the old golang.org/x/exp/winfsnotify, where you could
|
||||
// add various options to the watch. This has long since been removed.
|
||||
//
|
||||
// The "sys" in the name is misleading as they're not part of any "system".
|
||||
//
|
||||
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
|
||||
const (
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
sysFSMODIFY = 0x2
|
||||
sysFSMOVE = 0xc0
|
||||
sysFSMOVEDFROM = 0x40
|
||||
sysFSMOVEDTO = 0x80
|
||||
sysFSMOVESELF = 0x800
|
||||
sysFSIGNORED = 0x8000
|
||||
)
|
||||
|
||||
func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
bufsize int
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle windows.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov windows.Overlapped
|
||||
ino *inode // i-number
|
||||
recurse bool // Recursive watch?
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf []byte // buffer, allocated later
|
||||
}
|
||||
|
||||
type (
|
||||
indexMap map[uint64]*watch
|
||||
watchMap map[uint32]indexMap
|
||||
)
|
||||
|
||||
func (w *readDirChangesW) wakeupReader() error {
|
||||
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
|
||||
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
|
||||
if err != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", err)
|
||||
}
|
||||
if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
|
||||
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
|
||||
windows.FILE_LIST_DIRECTORY,
|
||||
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
|
||||
nil, windows.OPEN_EXISTING,
|
||||
windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", err)
|
||||
}
|
||||
|
||||
var fi windows.ByHandleFileInformation
|
||||
err = windows.GetFileInformationByHandle(h, &fi)
|
||||
if err != nil {
|
||||
windows.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", err)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ino, err := w.getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
_, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
|
||||
if err != nil {
|
||||
windows.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
recurse: recurse,
|
||||
buf: make([]byte, bufsize),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
windows.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
|
||||
err = w.startRead(watchEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) remWatch(pathname string) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := w.getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
|
||||
if recurse && !watch.recurse {
|
||||
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
|
||||
}
|
||||
|
||||
err = windows.CloseHandle(ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||
}
|
||||
if watch == nil {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) startRead(watch *watch) error {
|
||||
err := windows.CancelIo(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CancelIo", err))
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := w.toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= w.toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
err := windows.CloseHandle(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// We need to pass the array, rather than the slice.
|
||||
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
|
||||
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
|
||||
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
|
||||
watch.recurse, mask, nil, &watch.ov, 0)
|
||||
if rdErr != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
||||
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *readDirChangesW) readEvents() {
|
||||
var (
|
||||
n uint32
|
||||
key uintptr
|
||||
ov *windows.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
// This error is handled after the watch == nil check below.
|
||||
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
|
||||
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.done:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
|
||||
err := windows.CloseHandle(w.port)
|
||||
if err != nil {
|
||||
err = os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch qErr {
|
||||
case nil:
|
||||
// No error
|
||||
case windows.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case windows.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case windows.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.sendError(ErrEventOverflow)
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
|
||||
// Create a buf that is the size of the path name
|
||||
size := int(raw.FileNameLength / 2)
|
||||
var buf []uint16
|
||||
// TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
|
||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
|
||||
sh.Len = size
|
||||
sh.Cap = size
|
||||
name := windows.UTF16ToString(buf)
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
if debug {
|
||||
internal.Debug(fullname, raw.Action)
|
||||
}
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
mask = sysFSDELETESELF
|
||||
case windows.FILE_ACTION_MODIFIED:
|
||||
mask = sysFSMODIFY
|
||||
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
// Update saved path of all sub-watches.
|
||||
old := filepath.Join(watch.path, watch.rename)
|
||||
w.mu.Lock()
|
||||
for _, watchMap := range w.watches {
|
||||
for _, ww := range watchMap {
|
||||
if strings.HasPrefix(ww.path, old) {
|
||||
ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sysFSMOVESELF
|
||||
}
|
||||
}
|
||||
|
||||
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(fullname, "", watch.names[name]&mask)
|
||||
}
|
||||
if raw.Action == windows.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
} else {
|
||||
w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
}
|
||||
|
||||
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
//lint:ignore ST1005 Windows should be capitalized
|
||||
w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.sendError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case windows.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
return sysFSDELETE
|
||||
case windows.FILE_ACTION_MODIFIED:
|
||||
return sysFSMODIFY
|
||||
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sysFSMOVEDFROM
|
||||
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sysFSMOVEDTO
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
496
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
496
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
@@ -1,496 +0,0 @@
|
||||
// Package fsnotify provides a cross-platform interface for file system
|
||||
// notifications.
|
||||
//
|
||||
// Currently supported systems:
|
||||
//
|
||||
// - Linux via inotify
|
||||
// - BSD, macOS via kqueue
|
||||
// - Windows via ReadDirectoryChangesW
|
||||
// - illumos via FEN
|
||||
//
|
||||
// # FSNOTIFY_DEBUG
|
||||
//
|
||||
// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
|
||||
// stderr. This can be useful to track down some problems, especially in cases
|
||||
// where fsnotify is used as an indirect dependency.
|
||||
//
|
||||
// Every event will be printed as soon as there's something useful to print,
|
||||
// with as little processing from fsnotify.
|
||||
//
|
||||
// Example output:
|
||||
//
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all files, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
b backend
|
||||
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// Event represents a file system notification.
|
||||
type Event struct {
|
||||
// Path to the file or directory.
|
||||
//
|
||||
// Paths are relative to the input; for example with Add("dir") the Name
|
||||
// will be set to "dir/file" if you create that file, but if you use
|
||||
// Add("/path/to/dir") it will be "/path/to/dir/file".
|
||||
Name string
|
||||
|
||||
// File operation that triggered the event.
|
||||
//
|
||||
// This is a bitmask and some systems may send multiple operations at once.
|
||||
// Use the Event.Has() method instead of comparing with ==.
|
||||
Op Op
|
||||
|
||||
// Create events will have this set to the old path if it's a rename. This
|
||||
// only works when both the source and destination are watched. It's not
|
||||
// reliable when watching individual files, only directories.
|
||||
//
|
||||
// For example "mv /tmp/file /tmp/rename" will emit:
|
||||
//
|
||||
// Event{Op: Rename, Name: "/tmp/file"}
|
||||
// Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
|
||||
renamedFrom string
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
type Op uint32
|
||||
|
||||
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
|
||||
// full description, and check them with [Event.Has].
|
||||
const (
|
||||
// A new pathname was created.
|
||||
Create Op = 1 << iota
|
||||
|
||||
// The pathname was written to; this does *not* mean the write has finished,
|
||||
// and a write can be followed by more writes.
|
||||
Write
|
||||
|
||||
// The path was removed; any watches on it will be removed. Some "remove"
|
||||
// operations may trigger a Rename if the file is actually moved (for
|
||||
// example "remove to trash" is often a rename).
|
||||
Remove
|
||||
|
||||
// The path was renamed to something else; any watches on it will be
|
||||
// removed.
|
||||
Rename
|
||||
|
||||
// File attributes were changed.
|
||||
//
|
||||
// It's generally not recommended to take action on this event, as it may
|
||||
// get triggered very frequently by some software. For example, Spotlight
|
||||
// indexing on macOS, anti-virus software, backup software, etc.
|
||||
Chmod
|
||||
|
||||
// File descriptor was opened.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableOpen
|
||||
|
||||
// File was read from.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableRead
|
||||
|
||||
// File opened for writing was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
//
|
||||
// The advantage of using this over Write is that it's more reliable than
|
||||
// waiting for Write events to stop. It's also faster (if you're not
|
||||
// listening to Write events): copying a file of a few GB can easily
|
||||
// generate tens of thousands of Write events in a short span of time.
|
||||
xUnportableCloseWrite
|
||||
|
||||
// File opened for reading was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableCloseRead
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNonExistentWatch is used when Remove() is called on a path that's not
|
||||
// added.
|
||||
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
|
||||
|
||||
// ErrClosed is used when trying to operate on a closed Watcher.
|
||||
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||
|
||||
// ErrEventOverflow is reported from the Errors channel when there are too
|
||||
// many events:
|
||||
//
|
||||
// - inotify: inotify returns IN_Q_OVERFLOW – because there are too
|
||||
// many queued events (the fs.inotify.max_queued_events
|
||||
// sysctl can be used to increase this).
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||
|
||||
// ErrUnsupported is returned by AddWith() when WithOps() specified an
|
||||
// Unportable event that's not supported on this platform.
|
||||
//lint:ignore ST1012 not relevant
|
||||
xErrUnsupported = errors.New("fsnotify: not supported with this backend")
|
||||
)
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
ev, errs := make(chan Event, defaultBufferSize), make(chan error)
|
||||
b, err := newBackend(ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
ev, errs := make(chan Event, sz), make(chan error)
|
||||
b, err := newBackend(ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(path string) error { return w.b.Add(path) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error { return w.b.Close() }
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// The order is undefined, and may differ per call. Returns nil if
|
||||
// [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string { return w.b.WatchList() }
|
||||
|
||||
// Supports reports if all the listed operations are supported by this platform.
|
||||
//
|
||||
// Create, Write, Remove, Rename, and Chmod are always supported. It can only
|
||||
// return false for an Op starting with Unportable.
|
||||
func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
|
||||
|
||||
func (o Op) String() string {
|
||||
var b strings.Builder
|
||||
if o.Has(Create) {
|
||||
b.WriteString("|CREATE")
|
||||
}
|
||||
if o.Has(Remove) {
|
||||
b.WriteString("|REMOVE")
|
||||
}
|
||||
if o.Has(Write) {
|
||||
b.WriteString("|WRITE")
|
||||
}
|
||||
if o.Has(xUnportableOpen) {
|
||||
b.WriteString("|OPEN")
|
||||
}
|
||||
if o.Has(xUnportableRead) {
|
||||
b.WriteString("|READ")
|
||||
}
|
||||
if o.Has(xUnportableCloseWrite) {
|
||||
b.WriteString("|CLOSE_WRITE")
|
||||
}
|
||||
if o.Has(xUnportableCloseRead) {
|
||||
b.WriteString("|CLOSE_READ")
|
||||
}
|
||||
if o.Has(Rename) {
|
||||
b.WriteString("|RENAME")
|
||||
}
|
||||
if o.Has(Chmod) {
|
||||
b.WriteString("|CHMOD")
|
||||
}
|
||||
if b.Len() == 0 {
|
||||
return "[no events]"
|
||||
}
|
||||
return b.String()[1:]
|
||||
}
|
||||
|
||||
// Has reports if this operation has the given operation.
|
||||
func (o Op) Has(h Op) bool { return o&h != 0 }
|
||||
|
||||
// Has reports if this event has the given operation.
|
||||
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||
|
||||
// String returns a string representation of the event with their path.
|
||||
func (e Event) String() string {
|
||||
if e.renamedFrom != "" {
|
||||
return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
|
||||
}
|
||||
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
||||
}
|
||||
|
||||
type (
|
||||
backend interface {
|
||||
Add(string) error
|
||||
AddWith(string, ...addOpt) error
|
||||
Remove(string) error
|
||||
WatchList() []string
|
||||
Close() error
|
||||
xSupports(Op) bool
|
||||
}
|
||||
addOpt func(opt *withOpts)
|
||||
withOpts struct {
|
||||
bufsize int
|
||||
op Op
|
||||
noFollow bool
|
||||
sendCreate bool
|
||||
}
|
||||
)
|
||||
|
||||
var debug = func() bool {
|
||||
// Check for exactly "1" (rather than mere existence) so we can add
|
||||
// options/flags in the future. I don't know if we ever want that, but it's
|
||||
// nice to leave the option open.
|
||||
return os.Getenv("FSNOTIFY_DEBUG") == "1"
|
||||
}()
|
||||
|
||||
var defaultOpts = withOpts{
|
||||
bufsize: 65536, // 64K
|
||||
op: Create | Write | Remove | Rename | Chmod,
|
||||
}
|
||||
|
||||
func getOptions(opts ...addOpt) withOpts {
|
||||
with := defaultOpts
|
||||
for _, o := range opts {
|
||||
if o != nil {
|
||||
o(&with)
|
||||
}
|
||||
}
|
||||
return with
|
||||
}
|
||||
|
||||
// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
|
||||
//
|
||||
// This only has effect on Windows systems, and is a no-op for other backends.
|
||||
//
|
||||
// The default value is 64K (65536 bytes) which is the highest value that works
|
||||
// on all filesystems and should be enough for most applications, but if you
|
||||
// have a large burst of events it may not be enough. You can increase it if
|
||||
// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
|
||||
//
|
||||
// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
func WithBufferSize(bytes int) addOpt {
|
||||
return func(opt *withOpts) { opt.bufsize = bytes }
|
||||
}
|
||||
|
||||
// WithOps sets which operations to listen for. The default is [Create],
|
||||
// [Write], [Remove], [Rename], and [Chmod].
|
||||
//
|
||||
// Excluding operations you're not interested in can save quite a bit of CPU
|
||||
// time; in some use cases there may be hundreds of thousands of useless Write
|
||||
// or Chmod operations per second.
|
||||
//
|
||||
// This can also be used to add unportable operations not supported by all
|
||||
// platforms; unportable operations all start with "Unportable":
|
||||
// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
|
||||
// [UnportableCloseRead].
|
||||
//
|
||||
// AddWith returns an error when using an unportable operation that's not
|
||||
// supported. Use [Watcher.Support] to check for support.
|
||||
func withOps(op Op) addOpt {
|
||||
return func(opt *withOpts) { opt.op = op }
|
||||
}
|
||||
|
||||
// WithNoFollow disables following symlinks, so the symlinks themselves are
|
||||
// watched.
|
||||
func withNoFollow() addOpt {
|
||||
return func(opt *withOpts) { opt.noFollow = true }
|
||||
}
|
||||
|
||||
// "Internal" option for recursive watches on inotify.
|
||||
func withCreate() addOpt {
|
||||
return func(opt *withOpts) { opt.sendCreate = true }
|
||||
}
|
||||
|
||||
var enableRecurse = false
|
||||
|
||||
// Check if this path is recursive (ends with "/..." or "\..."), and return the
|
||||
// path with the /... stripped.
|
||||
func recursivePath(path string) (string, bool) {
|
||||
path = filepath.Clean(path)
|
||||
if !enableRecurse { // Only enabled in tests for now.
|
||||
return path, false
|
||||
}
|
||||
if filepath.Base(path) == "..." {
|
||||
return filepath.Dir(path), true
|
||||
}
|
||||
return path, false
|
||||
}
|
||||
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
@@ -1,39 +0,0 @@
|
||||
//go:build darwin
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSyscallEACCES = syscall.EACCES
|
||||
ErrUnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = l.Cur
|
||||
|
||||
if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
|
||||
maxfiles = uint64(n)
|
||||
}
|
||||
|
||||
if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
|
||||
maxfiles = uint64(n)
|
||||
}
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
@@ -1,57 +0,0 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE},
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_BACKGROUND", unix.NOTE_BACKGROUND},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_CRITICAL", unix.NOTE_CRITICAL},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS},
|
||||
{"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR},
|
||||
{"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL},
|
||||
{"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL},
|
||||
{"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK},
|
||||
{"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY},
|
||||
{"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_FUNLOCK", unix.NOTE_FUNLOCK},
|
||||
{"NOTE_LEEWAY", unix.NOTE_LEEWAY},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_MACHTIME", unix.NOTE_MACHTIME},
|
||||
{"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME},
|
||||
{"NOTE_NONE", unix.NOTE_NONE},
|
||||
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||
{"NOTE_OOB", unix.NOTE_OOB},
|
||||
//{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!)
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_REAP", unix.NOTE_REAP},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||
{"NOTE_SIGNAL", unix.NOTE_SIGNAL},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||
{"NOTE_VM_ERROR", unix.NOTE_VM_ERROR},
|
||||
{"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE},
|
||||
{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE},
|
||||
{"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
@@ -1,33 +0,0 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_OOB", unix.NOTE_OOB},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
@@ -1,42 +0,0 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ABSTIME", unix.NOTE_ABSTIME},
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_CLOSE", unix.NOTE_CLOSE},
|
||||
{"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FILE_POLL", unix.NOTE_FILE_POLL},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_MSECONDS", unix.NOTE_MSECONDS},
|
||||
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||
{"NOTE_OPEN", unix.NOTE_OPEN},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_READ", unix.NOTE_READ},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
@@ -1,32 +0,0 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, kevent *unix.Kevent_t) {
|
||||
mask := uint32(kevent.Fflags)
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||
}
|
||||
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
@@ -1,56 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, mask, cookie uint32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"IN_ACCESS", unix.IN_ACCESS},
|
||||
{"IN_ATTRIB", unix.IN_ATTRIB},
|
||||
{"IN_CLOSE", unix.IN_CLOSE},
|
||||
{"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE},
|
||||
{"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE},
|
||||
{"IN_CREATE", unix.IN_CREATE},
|
||||
{"IN_DELETE", unix.IN_DELETE},
|
||||
{"IN_DELETE_SELF", unix.IN_DELETE_SELF},
|
||||
{"IN_IGNORED", unix.IN_IGNORED},
|
||||
{"IN_ISDIR", unix.IN_ISDIR},
|
||||
{"IN_MODIFY", unix.IN_MODIFY},
|
||||
{"IN_MOVE", unix.IN_MOVE},
|
||||
{"IN_MOVED_FROM", unix.IN_MOVED_FROM},
|
||||
{"IN_MOVED_TO", unix.IN_MOVED_TO},
|
||||
{"IN_MOVE_SELF", unix.IN_MOVE_SELF},
|
||||
{"IN_OPEN", unix.IN_OPEN},
|
||||
{"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW},
|
||||
{"IN_UNMOUNT", unix.IN_UNMOUNT},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
var c string
|
||||
if cookie > 0 {
|
||||
c = fmt.Sprintf("(cookie: %d) ", cookie)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n",
|
||||
time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name)
|
||||
}
|
||||
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
@@ -1,28 +0,0 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
// {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386?
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EOF", unix.NOTE_EOF},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRUNCATE", unix.NOTE_TRUNCATE},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
@@ -1,45 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, mask int32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m int32
|
||||
}{
|
||||
{"FILE_ACCESS", unix.FILE_ACCESS},
|
||||
{"FILE_MODIFIED", unix.FILE_MODIFIED},
|
||||
{"FILE_ATTRIB", unix.FILE_ATTRIB},
|
||||
{"FILE_TRUNC", unix.FILE_TRUNC},
|
||||
{"FILE_NOFOLLOW", unix.FILE_NOFOLLOW},
|
||||
{"FILE_DELETE", unix.FILE_DELETE},
|
||||
{"FILE_RENAME_TO", unix.FILE_RENAME_TO},
|
||||
{"FILE_RENAME_FROM", unix.FILE_RENAME_FROM},
|
||||
{"UNMOUNTED", unix.UNMOUNTED},
|
||||
{"MOUNTEDOVER", unix.MOUNTEDOVER},
|
||||
{"FILE_EXCEPTION", unix.FILE_EXCEPTION},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user