Compare commits

157 Commits

Author SHA1 Message Date
d4413c433f fmt 2025-11-02 11:04:12 +02:00
d9a4bb7871 fix fmt 2025-11-02 10:17:49 +02:00
8e017af3ed change sv1 version 2025-11-01 10:22:14 +02:00
ef6023330d 2025-10-22 15:05:12 +03:00
5474b22fc8 Update README.md 2025-10-12 13:11:55 +03:00
6cd678d9f1 Update README.md 2025-10-12 13:10:10 +03:00
856d3b418c Update README.md 2025-10-12 13:06:37 +03:00
5734ca7a67 update readme.md 2025-10-12 12:56:30 +03:00
608c5aed4a Update README.md 2025-10-12 12:46:07 +03:00
Aleksey
d4d04115f3 Update README.md 2025-10-12 11:10:21 +03:00
Aleksey
4b916f4fc9 Update README.md 2025-10-12 10:46:41 +03:00
54cc496c39 fix noname node message 2025-10-12 01:57:52 +03:00
f7b0014a37 fix bug 2025-10-12 01:56:11 +03:00
54eb5eec6a add wiki to gitignore 2025-10-11 23:20:14 +03:00
6cc24a1e7f update readme 2025-10-11 21:07:51 +03:00
ea41c435dd update readme 2025-10-11 21:04:59 +03:00
d24e1a94ae 2025-10-11 21:01:17 +03:00
846dc06601 update makefile 2025-10-11 16:31:35 +03:00
740fbbff78 fix version message 2025-10-11 16:25:17 +03:00
40be3c8d09 add sv1 version 2025-10-11 16:24:58 +03:00
9c140abc6d make a table 2025-10-11 12:49:53 +03:00
e90233aec4 complete part of readme 2025-10-11 12:44:48 +03:00
df1ef57769 fix bugs 2025-10-10 22:58:31 +03:00
4c840c40bb some changes 2025-10-10 22:54:29 +03:00
57f35e8f33 move go files to src/ 2025-10-10 22:46:24 +03:00
f0c591f325 delete old files 2025-10-10 22:28:55 +03:00
36ee320c45 comment upx 2025-10-10 22:22:34 +03:00
ee6fd205d5 fix the use of empty fields in the response 2025-10-10 22:22:15 +03:00
bed0471cc4 remove some messages 2025-10-10 22:03:21 +03:00
e3812a18a6 require bcrypt only if needed 2025-10-10 20:35:19 +03:00
b7d939d5d7 optimise db exec 2025-10-10 20:23:24 +03:00
c737e80b8f add password changing support 2025-10-10 20:02:17 +03:00
5783a756c3 add method update 2025-10-10 19:27:04 +03:00
ba47ee4219 move error messages to variables 2025-10-10 19:26:57 +03:00
5d49e0afc7 add _errors.lua 2025-10-10 19:26:35 +03:00
76fed578ff create fully functional get method 2025-10-09 20:26:25 +03:00
975c52b58e delete unused modules 2025-10-09 20:03:44 +03:00
4e75d48f1d small fixes 2025-10-09 20:00:48 +03:00
65af07fffa add delete method 2025-10-09 20:00:40 +03:00
1252634420 Change method separator to . and move separator symbol and regexp template to global variables 2025-10-09 19:56:41 +03:00
4a58845211 deledet some files 2025-10-05 19:10:53 +03:00
b0701632e6 add common function to Unit layer 2025-10-05 19:10:45 +03:00
9277aa9f1a add some files to ginignore 2025-10-05 19:10:31 +03:00
19654e1eca Ad some CRUD methods to manage units table 2025-10-05 19:09:58 +03:00
d4306a0d89 rename internal.sha256.sum to hash 2025-10-05 19:09:00 +03:00
73095a69e0 Merge branch 'main' of https://github.com/akyaiy/GoSally-mvp 2025-09-12 19:19:08 +03:00
0f82ce941b in 2025-09-12 19:18:22 +03:00
Aleksey
0ec8493ab4 Merge pull request #3 from akyaiy/auth-server
Auth server
2025-09-12 19:16:32 +03:00
625e5daf71 commit to merge branches 2025-09-12 19:13:46 +03:00
cc27843bb3 add GetSoneInfo (not functional) 2025-08-20 10:12:44 +03:00
20fec82159 delete old scripts 2025-08-20 10:12:15 +03:00
055b299ecb some changes with scripts and add new 2025-08-20 10:12:02 +03:00
17bf207087 fix little bug with script ending 2025-08-20 10:11:36 +03:00
7ae8e12dc8 add new method 2025-08-20 10:11:14 +03:00
6e36db428a fmt 2025-08-20 10:11:02 +03:00
06103a3264 add lua engine skeleton 2025-08-20 10:10:52 +03:00
c6da55ad65 some changes with data field, and fix smth 2025-08-10 16:26:41 +03:00
20a1e3e7bb fix the List.lua 2025-08-10 16:26:21 +03:00
e594d519a7 add set and set_error methods and fix some bugs 2025-08-10 09:49:35 +03:00
2ceb236a53 some small changes, and add send, send_error, throw_error and some field 2025-08-09 10:41:50 +03:00
811403a0a2 echo test function 2025-08-09 10:41:13 +03:00
b451f2d3fc fix jwt dep 2025-08-09 10:41:01 +03:00
5c01eaad6f rename field __gosally_internal to __seed 2025-08-07 19:58:42 +03:00
2b38e179db remove a mistake from specification 2025-08-07 15:46:27 +03:00
2889092821 fix some bugs with params and add params type check 2025-08-07 15:43:49 +03:00
3df3a7b4b5 remove default case because it's not allowed 2025-08-06 22:24:13 +03:00
c63f1bd123 remove default case because it's not allowed 2025-08-06 22:23:58 +03:00
095b8559f4 fix bug with params's array.. again 2025-08-06 19:48:02 +03:00
39532f22ea fix bug with result array 2025-08-06 19:36:08 +03:00
35cebee819 fix bug with empty result and non table result 2025-08-06 19:17:10 +03:00
84dfdd6b35 add sha356 module 2025-08-06 16:37:28 +03:00
e693efe8e7 add iat to jwt 2025-08-06 16:37:13 +03:00
c3dcf24e50 improve jwt 2025-08-06 16:36:55 +03:00
9e7d99e854 fmt 2025-08-06 16:36:39 +03:00
7f2783b39a add postfix -md5 to checksum field 2025-08-06 15:45:20 +03:00
c08135309f add random salt and result/error checksum 2025-08-06 15:41:25 +03:00
cd9e3ab6c4 remove phone_number from db query 2025-08-06 15:40:42 +03:00
adaedf195f solve a little problem with array's fetching 2025-08-06 14:35:17 +03:00
87694f6654 make ConvertGolangTypesToLua simplier 2025-08-06 14:01:45 +03:00
fe628e0f7f develop jwt auth for methods 2025-08-06 14:01:27 +03:00
3898e2833b fmt 2025-08-06 11:32:04 +03:00
e4db8505a0 add sid 2025-08-06 11:31:42 +03:00
0c25d00171 add github.com/golang-jwt/jwt/v5 to the project 2025-08-06 11:31:33 +03:00
b5a6de0b62 add jwt support 2025-08-06 11:31:14 +03:00
1d3d74846e rename database-sqlite to database.sqlite 2025-08-06 10:42:26 +03:00
0141427bfe add print to not allowed functions 2025-08-06 10:04:22 +03:00
866946646b delete some io.* writing functions 2025-08-06 09:58:40 +03:00
251e580e8a add headers lua runtime support 2025-08-05 23:15:13 +03:00
c734779b69 rename lL to L 2025-08-05 22:11:29 +03:00
0923f32b46 make a get function on fetch params table fields 2025-08-05 22:10:15 +03:00
1c2c4c1356 some small changes for auth scripts 2025-08-05 22:09:55 +03:00
d3eb483461 add com/_config.lua to .gitignore 2025-08-05 22:09:33 +03:00
5b32698ec5 some scripts changes 2025-08-05 18:37:58 +03:00
0ed734b2b1 fix sqlite import message 2025-08-04 16:49:33 +03:00
396352ba15 remove vendor 2025-08-04 16:38:05 +03:00
7b9bdcf768 add bcrypt module to lua 2025-08-04 16:37:54 +03:00
47058f0ddd move lua db to external file 2025-08-04 16:37:43 +03:00
Aleksey
24eef9eee0 Merge pull request #2 from akyaiy/dev
Dev
2025-08-04 15:15:00 +03:00
Aleksey
a6c9e5102f Merge branch 'main' into dev 2025-08-04 15:13:55 +03:00
a72627d87c delete table creation 2025-08-04 13:38:27 +03:00
4a9719cdfb sqlite usage example 2025-08-04 13:36:38 +03:00
7de5ec5248 add sqlite support 2025-08-04 13:36:30 +03:00
e5f9105364 change cors 2025-08-04 13:36:21 +03:00
ce2a23f9e6 make internal modules in "internal" 2025-08-02 16:22:05 +03:00
d56b022bf5 add com/test.lua to .gitignore 2025-08-02 16:13:22 +03:00
ca38c10ec4 made creation of private field before pushing 2025-08-02 16:12:44 +03:00
13dbd00bb7 update config exaple 2025-08-02 16:12:08 +03:00
e7289dc9be update post example 2025-08-02 11:50:19 +03:00
5394178abc update lua logging 2025-08-02 11:50:09 +03:00
981551e944 change default log output to stderr 2025-08-02 10:14:35 +03:00
27446adf3f add debug information to lua_handler and route 2025-08-02 10:04:00 +03:00
2f071c25b2 update annotations 2025-08-02 01:02:59 +03:00
d23fd32e84 update annotations 2025-08-02 01:01:05 +03:00
86d35a9ede update annotations 2025-08-02 00:55:59 +03:00
c77d51a95c some changes with config.log.output 2025-08-02 00:53:19 +03:00
3cbea14e84 fmt 2025-08-02 00:00:43 +03:00
6e59af1662 add json_format option for structure logging 2025-08-02 00:00:35 +03:00
8684d178e0 update Get example 2025-08-01 23:54:44 +03:00
945ab6c9cf update annotations 2025-08-01 23:53:40 +03:00
520901c331 update script error handling 2025-08-01 23:53:30 +03:00
9a274250cd update annotations 2025-08-01 23:25:46 +03:00
6d49d83ea7 update List.lua 2025-08-01 23:25:36 +03:00
fb04b3bc46 change in/out to request/response 2025-08-01 23:25:24 +03:00
a60b75a4c0 make lua deps modular 2025-08-01 23:18:45 +03:00
041fda8522 change ErrSessionIsTaken to EssSessionIsBusy 2025-08-01 20:07:15 +03:00
6508f03d08 fmt 2025-08-01 13:12:57 +03:00
93cf53025c rename init hooks names 2025-08-01 13:12:27 +03:00
83912b6c28 add context TODO 2025-08-01 12:56:09 +03:00
6ed5a7f9e0 add context to Handle method 2025-08-01 12:55:58 +03:00
2f78e9367c update logger initialization 2025-08-01 12:55:35 +03:00
ac074ce0ff change mapstructure to output 2025-08-01 12:55:17 +03:00
8bdf9197d6 set default config value to stdout 2025-08-01 12:54:58 +03:00
4db8fa2360 change fallback os exit to 0 2025-08-01 12:54:41 +03:00
2a48927a08 update hooks 2025-08-01 12:54:28 +03:00
58027bb988 add config processing 2025-08-01 12:54:18 +03:00
30a87fdb4c update List.lua 2025-08-01 12:53:25 +03:00
5cdfb2a543 config confirmation 2025-08-01 00:56:05 +03:00
08e96aa32a add optional config preview with confirmation 2025-08-01 00:08:38 +03:00
3b8390a0c8 basically added session manager, minimal. stores uuid and ttl sessions to eliminate recursive queries 2025-07-31 23:29:30 +03:00
b6ad0f82a0 add Post example 2025-07-31 21:47:09 +03:00
7009828e79 swap stages colors 2025-07-31 21:32:35 +03:00
45e541ac00 update Config structure and add node name parameter 2025-07-31 21:30:59 +03:00
a5a7354061 add a small reminder 2025-07-31 20:56:25 +03:00
20bb90e77a oh again update Get example 2025-07-31 20:51:30 +03:00
148ca53538 update Net.Http.Get example 2025-07-31 20:46:02 +03:00
2951fd2da9 add Net.Http.Get usage example 2025-07-31 20:39:44 +03:00
f411637520 add Net.Http to lua 2025-07-31 20:37:31 +03:00
75ee6e10aa change http server log level to LevelError 2025-07-31 19:48:06 +03:00
cfa7724b68 add exception for unknown logging level 2025-07-30 20:16:49 +03:00
f44e89b0de add new initialization hook to check configuration 2025-07-30 19:19:27 +03:00
23ed707029 make fields in configuration structures pointers, fix errors in code related to this change 2025-07-30 19:19:02 +03:00
299fd59e19 move %tmp% dereferencing to stage 6 of initialization, using a simpler and more efficient way of checking for contents 2025-07-30 19:17:36 +03:00
b601962354 add TO DO to config/consts.go 2025-07-30 18:43:07 +03:00
38f784b850 remove unused server api settings 2025-07-30 18:41:50 +03:00
6d2bf5cdd2 add "about" to scripts
you can get a description if you send a request where in params there is a field about with any content
2025-07-30 18:32:38 +03:00
f3c4b9e9b1 update config example 2025-07-30 12:02:12 +03:00
Aleksey
81359c036c Merge pull request #1 from akyaiy/dev
Dev
2025-07-29 16:51:18 +03:00
2186 changed files with 3106 additions and 6678457 deletions

11
.gitignore vendored
View File

@@ -5,6 +5,15 @@ tmp/
.meta/
db/
com/test.lua
com/_config.lua
.vscode
Taskfile.yml
config.yaml
config.yaml
wiki
# Garbage
com/_*
com/test.lua

View File

@@ -4,7 +4,10 @@ GOPATH := $(shell go env GOPATH)
export CONFIG_PATH := ./config.yaml
export NODE_PATH := $(shell pwd)
LDFLAGS := -X 'github.com/akyaiy/GoSally-mvp/internal/engine/config.NodeVersion=v0.0.1-dev'
NODE_VERSION := v0.0.1-dev
SV1_VERSION := v0.0.1-dev
LDFLAGS := -X 'github.com/akyaiy/GoSally-mvp/src/internal/engine/config.NodeVersion=$(NODE_VERSION)' -X 'github.com/akyaiy/GoSally-mvp/src/internal/server/sv1.SV1Version=$(SV1_VERSION)'
CGO_CFLAGS := -I/usr/local/include
CGO_LDFLAGS := -L/usr/local/lib -llua5.1 -lm -ldl
.PHONY: all build run runq test fmt vet lint check clean
@@ -29,15 +32,15 @@ build:
@echo "Building..."
@# @echo "CGO_CFLAGS is: '$(CGO_CFLAGS)'"
@# @echo "CGO_LDFLAGS is: '$(CGO_LDFLAGS)'"
@# CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)"
@go build -trimpath -ldflags "-w -s $(LDFLAGS)" -o $(BIN_DIR)/$(APP_NAME) ./
@if ! command -v upx >/dev/null 2>&1; then \
echo "upx not found, skipping compression."; \
elif upx -t $(BIN_DIR)/$(APP_NAME) >/dev/null 2>&1; then \
echo "$(BIN_DIR)/$(APP_NAME) already compressed, skipping."; \
else \
upx $(BIN_DIR)/$(APP_NAME) >/dev/null 2>&1 || true; \
fi
@# CGO_CFLAGS="$(CGO_CFLAGS)" CGO_LDFLAGS="$(CGO_LDFLAGS)"
cd src && go build -trimpath -ldflags "-w -s $(LDFLAGS)" -o ../$(BIN_DIR)/$(APP_NAME) ./
# @if ! command -v upx >/dev/null 2>&1; then \
# echo "upx not found, skipping compression."; \
# elif upx -t $(BIN_DIR)/$(APP_NAME) >/dev/null 2>&1; then \
# echo "$(BIN_DIR)/$(APP_NAME) already compressed, skipping."; \
# else \
# upx $(BIN_DIR)/$(APP_NAME) >/dev/null 2>&1 || true; \
# fi
run: build
@echo "Running!"
@@ -52,30 +55,22 @@ pure-run:
exec ./$(BIN_DIR)/$(APP_NAME)
test:
@go test ./... | grep -v '^?' || true
@cd src && go test ./... | grep -v '^?' || true
fmt:
@go fmt ./internal/./...
@go fmt ./cmd/./...
@go fmt ./hooks/./...
@$(GOPATH)/bin/goimports -w ./internal/
@$(GOPATH)/bin/goimports -w ./cmd/
@$(GOPATH)/bin/goimports -w ./hooks/
@cd src && go fmt .
@cd src && $(GOPATH)/bin/goimports -w .
vet:
@go vet ./...
lint:
@$(GOPATH)/bin/golangci-lint run
@cd src && go vet ./...
check: fmt vet lint test
licenses:
lint:
@cd src && $(GOPATH)/bin/golangci-lint run ./...
@$(GOPATH)/bin/go-licenses save ./... --save_path=third_party/licenses --force
@echo "Licenses have been exported to third_party/licenses"
clean:
@rm -rf bin
licenses:
@cd src && $(GOPATH)/bin/go-licenses save ./... --save_path=../third_party/licenses --force
@echo "Licenses have been exported to third_party/licenses"
help:
@echo "Available commands: $$(cat Makefile | grep -E '^[a-zA-Z_-]+:.*?' | grep -v -- '-setup:' | sed 's/:.*//g' | sort | uniq | tr '\n' ' ')"

147
README.md
View File

@@ -1,51 +1,132 @@
# Go Sally MVP (Minimum/Minimal Viable Product)
[![Status](https://img.shields.io/badge/status-MVP-orange.svg)]()
[![Go Version](https://img.shields.io/badge/Go-1.24.6-informational)](https://go.dev/)
[![Lua Version](https://img.shields.io/badge/Lua-5.1-informational)](https://www.lua.org/manual/5.1/)
[![Go Reference](https://pkg.go.dev/badge/github.com/akyaiy/GoSally-mvp.svg)](https://pkg.go.dev/github.com/akyaiy/GoSally-mvp)
[![License](https://img.shields.io/badge/license-BSD--3--Clause-blue)](LICENSE)
### What is this?
System that allows you to build your own infrastructure based on identical nodes and various scripts written using built-in Lua 5.1, shebang scripts (scripts that start with the `#!` symbols), compiled binaries.
[![Last Commit](https://img.shields.io/github/last-commit/akyaiy/GoSally-mvp)]()
[![Commits per month](https://img.shields.io/github/commit-activity/m/akyaiy/GoSally-mvp)]()
[![Docs](https://img.shields.io/badge/docs-wiki-blue)](https://github.com/akyaiy/GoSally-mvp/wiki)
### Features
Go Sally is not viable at the moment, but it already has the ability to run embedded scripts, log slog events to stdout, handle RPC like requests, and independent automatic update from the repository (my pride, to be honest).
### Example of use
The basic directory tree looks something like this
```
.
├── bin
│   └── node Node core binary file
├── com
│   ├── echo.lua
│   ├── _globals.lua Declaring global variables and functions for all internal scripts (also required for luarc to work correctly)
│   └── _prepare.lua Script that is executed before each script launch
└── config.yaml
> ⚡ **What, Why, Why Care?**
3 directories, 5 files
> **What:** Go Sally is a lightweight decentralized node system with Lua scripting and JSON-RPC2.0.
```
Launch by command
> **Why:** Large admin tools are too heavy, and Raspberry Pi and small servers require a lightweight, modular architecture.
> **Why Care:** Create, automate, and expand your infrastructure quickly, without unnecessary software or dependencies.
## Navigation
* [Core features](#core-features)
* [Quick start](#quick-start)
* [Test it](#test-it)
* [Concept](#concept)
* [API](#api)
* [License](#license)
* [Wiki →](https://github.com/akyaiy/GoSally-mvp/wiki)
> [!NOTE]
> If you see "💡" in the text, it means the information below is about plans for the future of the project.
## Core features
- **Decentralized nodes**<details>this means that *multiple GS[^1] nodes can be located on a single machine*, provided no attempt is made to disrupt, sabotage, or bypass the built-in protection mechanism against running a node under the same identifier as one already running in the system. Identification plays a role in node communication. 💡 In the future, we plan to create tools for conveniently building distributed systems using node identification.
**Why Care?** Multiple nodes on one machine allow testing, experimentation, and scaling small infrastructures without extra hardware or complex setup.</details>
- **RPC request processing**<details>the GS operates *using HTTP/https and the JSONRPC2.0 protocol.* Unlike gRPC, jsonrpc is extremely simple, allows for easy sending of requests from the browser, and does not require any additional code compilation. **Why Care?** Easy-to-use API means you can control nodes from anywhere, including lightweight web clients, without compiling extra code.</details>
- **Lua script-based methods**<details>*The gopher-lua library is used, providing full support for Lua 5.1.* scripts implement libraries for interacting with sessions (receiving parameters and sending responses), hashing, logging, and more. This allows you to quickly write business logic on the fly without touching the lower layers of abstraction, which also eliminates unnecessary compilation and the risk of breaking the codebase.
Example of the "echo" method:
```lua
local session = require("internal.session")
-- import the internal library for interacting with sessions
session.response.send(session.request.params.get())
-- send everything passed in the parameters in response.
```
**Why Care?** You can extend node behavior dynamically, write custom logic fast, and iterate without recompiling — perfect for experiments or automation.
</details>
- **Relatively flexible configuration** <details>
you can configure the server port, address, name, node settings, and more. 💡 More settings are planned in the future. **Why Care?** Configure nodes for any environment, from Raspberry Pi to VPS, without touching the source code. obviously :)</details>
- ***And more in the future***
> [!IMPORTANT]
> This is the beginning of the project's development, and some aspects of it may be unstable, unfinished, and the text about it may be overly ambitious. It's just a matter of time.
## Quick start
```bash
git clone https://github.com/akyaiy/GoSally-mvp.git && \
cd GoSally-mvp && \
make build && \
echo -e "node:\n com_dir: \"%path%/com\"" > config.yaml && \
mkdir -p com && \
echo -e 'local session = require("internal.session")\n\nsession.response.send(session.request.params.get())' > com/echo.lua && \
./bin/node run
```
or for structured logs
```bash
./bin/node run | jq
```
Example of GET request to server
If you have problems, make sure you have all [dependencies](https://github.com/akyaiy/GoSally-mvp/wiki/Getting-started#installing-dependencies) installed, otherwise [file an issue report](https://github.com/akyaiy/GoSally-mvp/issues)
### Test it
```bash
curl -s http://localhost:8080/api/v1/com/echo?msg=Hello
curl -X POST http://localhost:8080/com \
-d '{"jsonrpc":"2.0","context-version": "v1","method":"echo","params":["Hi!!"],"id":1}'
```
Then the response from the server
Expected response:
```json
{
"ResponsibleAgentUUID": "4593a87000bbe088f4e79c477e9c90d3",
"RequestedCommand": "echo",
"Response": {
"answer": "Hello",
"status": "ok"
"jsonrpc": "2.0",
"id": 1,
"result": [
"Hi!!"
],
"data": {
"responsible-node": "a0e1c440473ffd4d87e32cff2717f5b3",
"salt": "f26df732-a3be-4400-8e71-b8dc3ba705fc",
"checksum-md5": "cd8bec6a365d1b8ee90773567cb3ad0a"
}
}
```
### How to install
**You don't need it now, but you can figure it out with the Makefile**
## Concept
The project was originally conceived as a tool for building infrastructure using relatively *small nodes with limited functionality*. 💡 In the future, we plan to create a *web interface for interacting with nodes, administration, and configuration*. The concept is simple: suppose we have a node that manages Bind9. It has all the necessary methods for interacting with the service: creating new zones, viewing zone status, changing configuration, and server operation status. All of this works only through manual configuration, with the exception of larger solutions like Webmin and the BIND DNS Server module. The big problem is that while we only needed web configuration for Bind9, we have to pull in a massive amount of software just to implement one module. What if the service is hosted on a low-power Raspberry Pi? That's where GS nodes come in. By default, GS nodes communicate only through API calls, so 💡 in the future, we plan to create a dedicated, also programmable, web node that will provide convenient access to node management.
There's an obvious advantage here: transparency. The project is *completely open source and aims to support community-driven node functionality*. 💡 In the future, we plan to create a "store" similar to Docker Hub, which will contain scripts for configuring bind9, openvpn, and even custom projects.
## API
As mentioned earlier, *the server handles [jsonrpc2.0](https://www.jsonrpc.org/specification) requests*
```json
{
"jsonrpc": "2.0",
"context-version": "v1",
"method": "test",
"params": [
"Hi!!"
],
"id": 1
}
```
This is a typical example of a request using the jsonrpc2.0 protocol.
```json
{
"jsonrpc": "2.0",
"id": 1,
"result": [
"Hi!!"
],
"data": {
"responsible-node": "2ad6ebeaf579a7c52801fb6c9dd1b83d",
"salt": "e7a81115-01c1-45b1-9618-0eae0ff26451",
"checksum-md5": "cd8bec6a365d1b8ee90773567cb3ad0a"
}
}
```
In the result field, we see the echo method's response. Those familiar with the jsonrpc2.0 specification will notice that the data structure here is unclear. This is my extension, which has three functions:
| Field | Type | Description |
|--------|------|-------------|
| `responsible-node` | string | ID of the node that executed the task |
| `salt` | string | Random value for each request — can be used to check that the response is unique |
| `checksum-md5` | string | MD5 hash of the result field — can be used to avoid processing identical results separately |
## License
Distributed under the BSD 3-Clause License. See [`LICENSE`](./LICENSE) for more information.
[^1]: Go Sally

22
com/Access/_common.lua Normal file
View File

@@ -0,0 +1,22 @@
-- File com/Access/_common.lua
--
-- Created at 2025-21-10
--
-- Description:
--- Common functions for Unit module
local common = {}
function common.CheckMissingElement(arr, cmp)
local is_missing = {}
local ok = true
for _, key in ipairs(arr) do
if cmp[key] == nil then
table.insert(is_missing, key)
ok = false
end
end
return ok, is_missing
end
return common

30
com/Access/_errors.lua Normal file
View File

@@ -0,0 +1,30 @@
-- File com/Access/_errors.lua
--
-- Created at 2025-21-10
-- Description:
--- Centralized error definitions for Access operations
--- to keep API responses consistent and clean.
local errors = {
-- Common validation
MISSING_PARAMS = { code = -32602, message = "Missing params" },
INVALID_FIELD_TYPE = { code = -32602, message = "'fields' must be a non-empty table" },
INVALID_BY_PARAM = { code = -32602, message = "Invalid 'by' param" },
NO_VALID_FIELDS = { code = -32604, message = "No valid fields to update" },
-- Existence / duplication
UNIT_NOT_FOUND = { code = -32102, message = "Unit is not exists" },
UNIT_EXISTS = { code = -32101, message = "Unit is already exists" },
-- Database & constraint
UNIQUE_CONSTRAINT = { code = -32602, message = "Unique constraint failed" },
DB_QUERY_FAILED = { code = -32001, message = "Database query failed" },
DB_EXEC_FAILED = { code = -32002, message = "Database execution failed" },
DB_INSERT_FAILED = { code = -32003, message = "Failed to create unit" },
DB_DELETE_FAILED = { code = -32004, message = "Failed to delete unit" },
-- Generic fallback
UNKNOWN = { code = -32099, message = "Unexpected internal error" },
}
return errors

11
com/Echo.lua Normal file
View File

@@ -0,0 +1,11 @@
local s = require("internal.session")
if not s.request.params.__fetched.data then
s.response.error = {
code = 123,
message = "params.data is missing"
}
return
end
s.response.send(s.request.params.__fetched)

View File

@@ -1,6 +1,20 @@
-- com/List.lua
local function isValidCommand(name)
local session = require("internal.session")
local params = session.request.params.get()
if params.about then
session.response.result = {
description = "Returns a list of available methods",
params = {
layer = "select which layer list to display"
}
}
return
end
local function isValidName(name)
return name:match("^[%w]+$") ~= nil
end
@@ -11,8 +25,20 @@ local function scanDirectory(basePath, targetPath)
if handle then
for filePath in handle:lines() do
local fileName = filePath:match("([^/]+)%.lua$")
if fileName and isValidCommand(fileName) then
local parts = {}
for part in filePath:gsub(".lua$", ""):gmatch("[^/]+") do
table.insert(parts, part)
end
local allValid = true
for _, part in ipairs(parts) do
if not isValidName(part) then
allValid = false
break
end
end
if allValid then
local relPath = filePath:gsub("^"..basePath.."/", ""):gsub(".lua$", ""):gsub("/", ">")
table.insert(res, relPath)
end
@@ -24,8 +50,8 @@ local function scanDirectory(basePath, targetPath)
end
local basePath = "com"
local layer = In.Params and In.Params.layer and In.Params.layer:gsub(">", "/") or nil
local layer = params.layer and params.layer:gsub(">", "/") or nil
Out.Result = {
session.response.send({
answer = layer and scanDirectory(basePath, layer) or scanDirectory(basePath, "")
}
})

68
com/Unit/Create.lua Normal file
View File

@@ -0,0 +1,68 @@
-- File com/Unit/Create.lua
--
-- Created at 2025-05-10 18:23
--
-- Updated at -
-- Description:
--- Creates a record in the unit.db database without
--- requiring additional permissions. Requires username,
--- password (hashing occurs at the server level), and email fields.
local log = require("internal.log")
local db = require("internal.database.sqlite").connect("db/unit.db", {log = true})
local session = require("internal.session")
local crypt = require("internal.crypt.bcrypt")
local sha256 = require("internal.crypt.sha256")
local common = require("com/Unit/_common")
local errors = require("com/Unit/_errors")
-- Preparing for first db query
local function close_db()
if db then
log.debug("Closing DB connection")
db:close()
db = nil
end
end
local params = session.request.params.get()
local ok, mp = common.CheckMissingElement({"username", "password", "email"}, params)
if not ok then
close_db()
session.response.send_error(errors.MISSING_PARAMS.code, errors.MISSING_PARAMS.message, mp)
end
local hashPass = crypt.generate(params.password, crypt.DefaultCost)
local unitID = string.sub(sha256.hash(session.__seed), 1, 16)
local ctx, err = db:exec(
"INSERT INTO units (user_id, username, email, password) VALUES (?, ?, ?, ?)",
{
unitID,
params.username,
params.email,
hashPass,
}
)
if err ~= nil then
log.error("Insert failed: "..tostring(err))
close_db()
session.response.send_error(errors.DB_INSERT_FAILED.code, errors.DB_INSERT_FAILED.message)
end
local _, err = ctx:wait()
if err ~= nil then
close_db()
if tostring(err):match("UNIQUE constraint failed") then
session.response.send_error(errors.UNIT_EXISTS.code, errors.UNIT_EXISTS.message)
else
log.error("Insert confirmation failed: "..tostring(err))
session.response.send_error()
end
end
close_db()
session.response.send({unit_id = unitID})

77
com/Unit/Delete.lua Normal file
View File

@@ -0,0 +1,77 @@
-- File com/Unit/Delete.lua
--
-- Created at 2025-05-10 19:18
--
-- Updated at -
local log = require("internal.log")
local db = require("internal.database.sqlite").connect("db/unit.db", {log = true})
local session = require("internal.session")
local common = require("com/Unit/_common")
local errors = require("com/Unit/_errors")
-- Preparing for first db query
local function close_db()
if db then
log.debug("Closing DB connection")
db:close()
db = nil
end
end
local params = session.request.params.get()
local ok, mp = common.CheckMissingElement({"user_id"}, params)
if not ok then
close_db()
session.response.send_error(errors.MISSING_PARAMS.code, errors.MISSING_PARAMS.message, mp)
end
local existing, err = db:query([[
SELECT 1
FROM units
WHERE user_id = ?
AND entry_status != 'deleted'
AND deleted_at IS NULL
LIMIT 1
]], {
params.user_id
})
if err ~= nil then
log.error("Email check failed: "..tostring(err))
close_db()
session.response.send_error()
end
if existing and #existing == 0 then
close_db()
session.response.send_error(errors.UNIT_NOT_FOUND.code, errors.UNIT_NOT_FOUND.message)
end
local ctx, err = db:exec(
[[
UPDATE units
SET entry_status = 'deleted',
deleted_at = CURRENT_TIMESTAMP
WHERE user_id = ? AND deleted_at is NULL
]],
{ params.user_id }
)
if err ~= nil then
log.error("Soft delete failed: " .. tostring(err))
close_db()
session.response.send_error(errors.DB_DELETE_FAILED.code, errors.DB_DELETE_FAILED.message)
end
local res, err = ctx:wait()
if err ~= nil then
log.error("Soft delete confirmation failed: " .. tostring(err))
close_db()
session.response.send_error(errors.DB_DELETE_FAILED.code, errors.DB_DELETE_FAILED.message)
end
close_db()
session.response.send()

55
com/Unit/Get.lua Normal file
View File

@@ -0,0 +1,55 @@
-- File com/Unit/Get.lua
--
-- Created at 2025-09-25 20:04
--
-- Updated at -
local log = require("internal.log")
local db = require("internal.database.sqlite").connect("db/unit.db", {log = true})
local session = require("internal.session")
local common = require("com/Unit/_common")
local errors = require("com/Unit/_errors")
-- Preparing for first db query
local function close_db()
if db then
log.debug("Closing DB connection")
db:close()
db = nil
end
end
local params = session.request.params.get()
local ok, mp = common.CheckMissingElement({"by", "value"}, params)
if not ok then
close_db()
session.response.send_error(errors.MISSING_PARAMS.code, errors.MISSING_PARAMS.message, mp)
end
if not (params.by == "email" or params.by == "username" or params.by == "user_id") then
close_db()
session.response.send_error(errors.INVALID_BY_PARAM.code, errors.INVALID_BY_PARAM.message)
end
local unit, err = db:query_row(
"SELECT user_id, username, email, created_at, updated_at, deleted_at, entry_status FROM units WHERE "..params.by.." = ? AND deleted_at IS NULL LIMIT 1",
{
params.value
}
)
if err then
close_db()
log.error("DB query error: " .. tostring(err))
session.response.send_error()
end
if not unit then
close_db()
session.response.send_error(errors.UNIT_NOT_FOUND.code, errors.UNIT_NOT_FOUND.message)
end
close_db()
session.response.send(unit)

102
com/Unit/Update.lua Normal file
View File

@@ -0,0 +1,102 @@
-- File com/Unit/Update.lua
--
-- Created at 2025-10-10
--
local log = require("internal.log")
local db = require("internal.database.sqlite").connect("db/unit.db", { log = true })
local session = require("internal.session")
local common = require("com/Unit/_common")
local errors = require("com/Unit/_errors")
local function close_db()
if db then
log.debug("Closing DB connection")
db:close()
db = nil
end
end
local params = session.request.params.get()
local ok, mp = common.CheckMissingElement({"user_id", "fields"}, params)
if not ok then
close_db()
session.response.send_error(errors.MISSING_PARAMS.code, errors.MISSING_PARAMS.message, mp)
end
if type(params.fields) ~= "table" or next(params.fields) == nil then
close_db()
session.response.send_error(errors.INVALID_FIELD_TYPE.code, errors.INVALID_FIELD_TYPE.message)
end
local allowed = {
username = true,
email = true,
password = true,
entry_status = true
}
local exists = db:query_row(
"SELECT 1 FROM units WHERE user_id = ? AND deleted_at IS NULL LIMIT 1",
{ params.user_id }
)
if not exists then
close_db()
session.response.send_error(errors.UNIT_NOT_FOUND.code, errors.UNIT_NOT_FOUND.message)
end
local set_clauses = {}
local values = {}
for k, v in pairs(params.fields) do
if allowed[k] then
if k == "password" then
local crypt = require("internal.crypt.bcrypt")
v = crypt.generate(v, crypt.DefaultCost)
end
table.insert(set_clauses, k .. " = ?")
table.insert(values, v)
else
log.warn("Ignoring unsupported field: " .. k)
end
end
if #set_clauses == 0 then
close_db()
session.response.send_error(errors.NO_VALID_FIELDS.code, errors.NO_VALID_FIELDS.message)
end
table.insert(set_clauses, "updated_at = CURRENT_TIMESTAMP")
local query = "UPDATE units SET " .. table.concat(set_clauses, ", ")
.. " WHERE user_id = ? AND deleted_at IS NULL"
table.insert(values, params.user_id)
local ctx, err = db:exec(query, values)
if not ctx then
close_db()
if tostring(err):match("UNIQUE constraint failed") then
session.response.send_error(errors.UNIQUE_CONSTRAINT.code, errors.UNIQUE_CONSTRAINT.message)
else
session.response.send_error()
end
end
local _, err = ctx:wait()
if err ~= nil then
close_db()
if tostring(err):match("UNIQUE constraint failed") then
session.response.send_error(errors.UNIQUE_CONSTRAINT.code, errors.UNIQUE_CONSTRAINT.message)
else
log.error("Insert confirmation failed: "..tostring(err))
session.response.send_error()
end
end
close_db()
session.response.send()

23
com/Unit/_common.lua Normal file
View File

@@ -0,0 +1,23 @@
-- File com/Unit/_common.lua
--
-- Created at 2025-05-10 18:23
--
-- Updated at -
-- Description:
--- Common functions for Unit module
local common = {}
function common.CheckMissingElement(arr, cmp)
local is_missing = {}
local ok = true
for _, key in ipairs(arr) do
if cmp[key] == nil then
table.insert(is_missing, key)
ok = false
end
end
return ok, is_missing
end
return common

30
com/Unit/_errors.lua Normal file
View File

@@ -0,0 +1,30 @@
-- File com/Unit/_errors.lua
--
-- Created at 2025-10-10
-- Description:
--- Centralized error definitions for Unit operations
--- to keep API responses consistent and clean.
local errors = {
-- Common validation
MISSING_PARAMS = { code = -32602, message = "Missing params" },
INVALID_FIELD_TYPE = { code = -32602, message = "'fields' must be a non-empty table" },
INVALID_BY_PARAM = { code = -32602, message = "Invalid 'by' param" },
NO_VALID_FIELDS = { code = -32604, message = "No valid fields to update" },
-- Existence / duplication
UNIT_NOT_FOUND = { code = -32102, message = "Unit is not exists" },
UNIT_EXISTS = { code = -32101, message = "Unit is already exists" },
-- Database & constraint
UNIQUE_CONSTRAINT = { code = -32602, message = "Unique constraint failed" },
DB_QUERY_FAILED = { code = -32001, message = "Database query failed" },
DB_EXEC_FAILED = { code = -32002, message = "Database execution failed" },
DB_INSERT_FAILED = { code = -32003, message = "Failed to create unit" },
DB_DELETE_FAILED = { code = -32004, message = "Failed to delete unit" },
-- Generic fallback
UNKNOWN = { code = -32099, message = "Unexpected internal error" },
}
return errors

View File

@@ -1,12 +0,0 @@
local function validate()
if not In.Params.msg or In.Params.msg == "" then
Out.Error = {
message = "there must be a msg parameter"
}
return
end
end
validate()
Out.Result.answer = In.Params.msg

View File

@@ -1,7 +0,0 @@
Log.Event("got ping")
Log.EventWarn("got ping")
Log.EventError("got ping")
Log.Error("mm")
Out.Result.answer = "pong"

View File

@@ -1,24 +1,54 @@
---@diagnostic disable: missing-fields, missing-return
---@alias AnyTable table<string, any>
--@diagnostic disable: missing-fields, missing-return
---@type AnyTable
In = {
Params = {},
}
---@alias Any any
---@alias AnyTable table<string, Any>
---@type AnyTable
Out = {
Result = {},
}
--- Global session module interface
---@class SessionIn
---@field params AnyTable Request parameters
---@class Log
---@field Info fun(msg: string)
---@field Debug fun(msg: string)
---@field Error fun(msg: string)
---@field Warn fun(msg: string)
---@field Event fun(msg: string)
---@field EventError fun(msg: string)
---@field EventWarn fun(msg: string)
---@class SessionOut
---@field result Any|string? Result payload (table or primitive)
---@field error { code: integer, message: string, data: Any }? Optional error info
---@type Log
Log = {}
---@class SessionModule
---@field request SessionIn Input context (read-only)
---@field response SessionOut Output context (write results/errors)
--- Global log module interface
---@class LogModule
---@field info fun(msg: string) Log informational message
---@field debug fun(msg: string) Log debug message
---@field error fun(msg: string) Log error message
---@field warn fun(msg: string) Log warning message
---@field event fun(msg: string) Log event (generic)
---@field event_error fun(msg: string) Log event error
---@field event_warn fun(msg: string) Log event warning
--- Global net module interface
---@class HttpResponse
---@field status integer HTTP status code
---@field status_text string HTTP status text
---@field body string Response body
---@field content_length integer Content length
---@field headers AnyTable Map of headers
---@class HttpModule
---@field get fun(log: boolean, url: string): HttpResponse, string? Perform GET
---@field post fun(log: boolean, url: string, content_type: string, payload: string): HttpResponse, string? Perform POST
---@class NetModule
---@field http HttpModule HTTP client functions
--- Global variables declaration
---@global
---@type SessionModule
_G.session = session or {}
---@global
---@type LogModule
_G.log = log or {}
---@global
---@type NetModule
_G.net = net or {}

View File

@@ -2,15 +2,3 @@
package.path = package.path .. ";/usr/lib64/lua/5.1/?.lua;/usr/local/share/lua/5.1/?.lua" .. ";./com/?.lua;"
package.cpath = package.cpath .. ";/usr/lib64/lua/5.1/?.so;/usr/local/lib/lua/5.1/?.so"
print = function() end
io.write = function(...) end
io.stdout = function() return nil end
io.stderr = function() return nil end
io.read = function(...) return nil end
---@type table<string, any>
Status = {
ok = "ok",
error = "error",
invalid = "invalid",
}

View File

@@ -1,21 +0,0 @@
mode: "prod"
http_server:
address: "0.0.0.0:8080"
api:
latest-version: v1
layers:
- b1
- s2
tls:
enabled: false
cert_file: "./cert/fullchain.pem"
key_file: "./cert/privkey.pem"
com_dir: "com/"
updates:
enabled: true
check-interval: 1h
repository_url: "https://repo.serve.lv/raw/go-sally"

View File

@@ -1,187 +0,0 @@
package hooks
import (
"errors"
"fmt"
"io"
"io/fs"
"log"
"log/slog"
"os"
"path/filepath"
"slices"
"syscall"
"time"
"github.com/akyaiy/GoSally-mvp/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/internal/core/run_manager"
"github.com/akyaiy/GoSally-mvp/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/internal/engine/logs"
"gopkg.in/ini.v1"
)
var Compositor *config.Compositor = config.NewCompositor()
func Init0Hook(cs *corestate.CoreState, x *app.AppX) {
x.Config = Compositor
x.Log.SetOutput(os.Stdout)
x.Log.SetPrefix(logs.SetBrightBlack(fmt.Sprintf("(%s) ", cs.Stage)))
x.Log.SetFlags(log.Ldate | log.Ltime)
}
// First stage: pre-init
func Init1Hook(cs *corestate.CoreState, x *app.AppX) {
*cs = *corestate.NewCorestate(&corestate.CoreState{
UUID32DirName: "uuid",
NodeBinName: filepath.Base(os.Args[0]),
NodeVersion: config.NodeVersion,
MetaDir: "./.meta",
Stage: corestate.StagePreInit,
StartTimestampUnix: time.Now().Unix(),
})
}
func Init2Hook(cs *corestate.CoreState, x *app.AppX) {
x.Log.SetPrefix(logs.SetBlue(fmt.Sprintf("(%s) ", cs.Stage)))
if err := x.Config.LoadEnv(); err != nil {
x.Log.Fatalf("env load error: %s", err)
}
cs.NodePath = x.Config.Env.NodePath
if cfgPath := x.Config.CMDLine.Run.ConfigPath; cfgPath != "" {
x.Config.Env.ConfigPath = cfgPath
}
if err := x.Config.LoadConf(x.Config.Env.ConfigPath); err != nil {
x.Log.Fatalf("conf load error: %s", err)
}
}
func Init3Hook(cs *corestate.CoreState, x *app.AppX) {
uuid32, err := corestate.GetNodeUUID(filepath.Join(cs.MetaDir, "uuid"))
if errors.Is(err, fs.ErrNotExist) {
if err := corestate.SetNodeUUID(filepath.Join(cs.NodePath, cs.MetaDir, cs.UUID32DirName)); err != nil {
x.Log.Fatalf("Cannod generate node uuid: %s", err.Error())
}
uuid32, err = corestate.GetNodeUUID(filepath.Join(cs.MetaDir, "uuid"))
if err != nil {
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
}
if err != nil {
x.Log.Fatalf("uuid load error: %s", err)
}
cs.UUID32 = uuid32
}
func Init4Hook(cs *corestate.CoreState, x *app.AppX) {
if x.Config.Env.ParentStagePID != os.Getpid() {
if !slices.Contains(x.Config.Conf.DisableWarnings, "--WNonStdTmpDir") && os.TempDir() != "/tmp" {
x.Log.Printf("%s: %s", logs.PrintWarn(), "Non-standard value specified for temporary directory")
}
// still pre-init stage
runDir, err := run_manager.Create(cs.UUID32)
if err != nil {
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
cs.RunDir = runDir
input, err := os.Open(os.Args[0])
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
if err := run_manager.Set(cs.NodeBinName); err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
fmgr := run_manager.File(cs.NodeBinName)
output, err := fmgr.Open()
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
if _, err := io.Copy(output, input); err != nil {
fmgr.Close()
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
if err := os.Chmod(filepath.Join(cs.RunDir, cs.NodeBinName), 0755); err != nil {
fmgr.Close()
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
input.Close()
fmgr.Close()
runArgs := os.Args
runArgs[0] = filepath.Join(cs.RunDir, cs.NodeBinName)
// prepare environ
env := utils.SetEviron(os.Environ(), fmt.Sprintf("GS_PARENT_PID=%d", os.Getpid()))
if err := syscall.Exec(runArgs[0], runArgs, env); err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
}
x.Log.Printf("Node uuid is %s", cs.UUID32)
}
// post-init stage
func Init5Hook(cs *corestate.CoreState, x *app.AppX) {
cs.Stage = corestate.StagePostInit
x.Log.SetPrefix(logs.SetYellow(fmt.Sprintf("(%s) ", cs.Stage)))
cs.RunDir = run_manager.Toggle()
exist, err := utils.ExistsMatchingDirs(filepath.Join(os.TempDir(), fmt.Sprintf("/*-%s-%s", cs.UUID32, "gosally-runtime")), cs.RunDir)
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
if exist {
_ = run_manager.Clean()
x.Log.Fatalf("Unable to continue node operation: A node with the same identifier was found in the runtime environment")
}
if err := run_manager.Set("run.lock"); err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
lockPath, err := run_manager.Get("run.lock")
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
lockFile := ini.Empty()
secRun, err := lockFile.NewSection("runtime")
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
secRun.Key("pid").SetValue(fmt.Sprintf("%d/%d", os.Getpid(), x.Config.Env.ParentStagePID))
secRun.Key("version").SetValue(cs.NodeVersion)
secRun.Key("uuid").SetValue(cs.UUID32)
secRun.Key("timestamp").SetValue(time.Unix(cs.StartTimestampUnix, 0).Format("2006-01-02/15:04:05 MST"))
secRun.Key("timestamp-unix").SetValue(fmt.Sprintf("%d", cs.StartTimestampUnix))
err = lockFile.SaveTo(lockPath)
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
}
func Init6Hook(cs *corestate.CoreState, x *app.AppX) {
cs.Stage = corestate.StageReady
x.Log.SetPrefix(logs.SetGreen(fmt.Sprintf("(%s) ", cs.Stage)))
x.SLog = new(slog.Logger)
newSlog, err := logs.SetupLogger(x.Config.Conf.Log)
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
*x.SLog = *newSlog
}

View File

@@ -1,168 +0,0 @@
package hooks
import (
"context"
"errors"
"fmt"
"log"
"net"
"net/http"
"regexp"
"time"
"github.com/akyaiy/GoSally-mvp/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/internal/core/run_manager"
"github.com/akyaiy/GoSally-mvp/internal/core/update"
"github.com/akyaiy/GoSally-mvp/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/internal/engine/logs"
"github.com/akyaiy/GoSally-mvp/internal/server/gateway"
"github.com/akyaiy/GoSally-mvp/internal/server/sv1"
"github.com/go-chi/chi/v5"
"github.com/go-chi/cors"
"github.com/spf13/cobra"
"golang.org/x/net/netutil"
)
var nodeApp = app.New()
func Run(cmd *cobra.Command, args []string) {
nodeApp.InitialHooks(
Init0Hook, Init1Hook, Init2Hook,
Init3Hook, Init4Hook, Init5Hook,
Init6Hook,
)
nodeApp.Run(RunHook)
}
func RunHook(ctx context.Context, cs *corestate.CoreState, x *app.AppX) error {
ctxMain, cancelMain := context.WithCancel(ctx)
runLockFile := run_manager.File("run.lock")
_, err := runLockFile.Open()
if err != nil {
x.Log.Fatalf("cannot open run.lock: %s", err)
}
_, err = runLockFile.Watch(ctxMain, func() {
x.Log.Printf("run.lock was touched")
_ = run_manager.Clean()
cancelMain()
})
if err != nil {
x.Log.Printf("watch error: %s", err)
}
serverv1 := sv1.InitV1Server(&sv1.HandlerV1InitStruct{
X: x,
CS: cs,
AllowedCmd: regexp.MustCompile(`^[a-zA-Z0-9]+(>[a-zA-Z0-9]+)*$`),
Ver: "v1",
})
s := gateway.InitGateway(&gateway.GatewayServerInit{
CS: cs,
X: x,
}, serverv1)
r := chi.NewRouter()
r.Use(cors.Handler(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "POST", "OPTIONS"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token"},
AllowCredentials: true,
MaxAge: 300,
}))
r.HandleFunc(config.ComDirRoute, s.Handle)
r.Route("/favicon.ico", func(r chi.Router) {
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
})
})
srv := &http.Server{
Addr: x.Config.Conf.HTTPServer.Address,
Handler: r,
ErrorLog: log.New(&logs.SlogWriter{
Logger: x.SLog,
Level: logs.GlobalLevel,
}, "", 0),
}
nodeApp.Fallback(func(ctx context.Context, cs *corestate.CoreState, x *app.AppX) {
if err := srv.Shutdown(ctxMain); err != nil {
x.Log.Printf("%s: Failed to stop the server gracefully: %s", logs.PrintError(), err.Error())
} else {
x.Log.Printf("Server stopped gracefully")
}
x.Log.Println("Cleaning up...")
if err := run_manager.Clean(); err != nil {
x.Log.Printf("%s: Cleanup error: %s", logs.PrintError(), err.Error())
}
x.Log.Println("bye!")
})
go func() {
defer utils.CatchPanicWithCancel(cancelMain)
if x.Config.Conf.TLS.TlsEnabled {
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%s", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port))
if err != nil {
x.Log.Printf("%s: Failed to start TLS listener: %s", logs.PrintError(), err.Error())
cancelMain()
return
}
x.Log.Printf("Serving on %s port %s with TLS... (https://%s%s)", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port, fmt.Sprintf("%s:%s", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port), config.ComDirRoute)
limitedListener := netutil.LimitListener(listener, 100)
if err := srv.ServeTLS(limitedListener, x.Config.Conf.TLS.CertFile, x.Config.Conf.TLS.KeyFile); err != nil && !errors.Is(err, http.ErrServerClosed) {
x.Log.Printf("%s: Failed to start HTTPS server: %s", logs.PrintError(), err.Error())
cancelMain()
}
} else {
x.Log.Printf("Serving on %s port %s... (http://%s%s)", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port, fmt.Sprintf("%s:%s", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port), config.ComDirRoute)
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%s", x.Config.Conf.HTTPServer.Address, x.Config.Conf.HTTPServer.Port))
if err != nil {
x.Log.Printf("%s: Failed to start listener: %s", logs.PrintError(), err.Error())
cancelMain()
return
}
limitedListener := netutil.LimitListener(listener, 100)
if err := srv.Serve(limitedListener); err != nil && !errors.Is(err, http.ErrServerClosed) {
x.Log.Printf("%s: Failed to start HTTP server: %s", logs.PrintError(), err.Error())
cancelMain()
}
}
}()
if x.Config.Conf.Updates.UpdatesEnabled {
go func() {
defer utils.CatchPanicWithCancel(cancelMain)
updated := update.NewUpdater(&update.UpdaterInit{
X: x,
Ctx: ctxMain,
Cancel: cancelMain,
})
updated.Shutdownfunc(cancelMain)
for {
isNewUpdate, err := updated.CkeckUpdates()
if err != nil {
x.Log.Printf("Failed to check for updates: %s", err.Error())
}
if isNewUpdate {
if err := updated.Update(); err != nil {
x.Log.Printf("Failed to update: %s", err.Error())
} else {
x.Log.Printf("Update completed successfully")
}
}
time.Sleep(x.Config.Conf.Updates.CheckInterval)
}
}()
}
<-ctxMain.Done()
nodeApp.CallFallback(ctx)
return nil
}

View File

@@ -1,80 +0,0 @@
// Package config provides configuration management for the application.
// config is built on top of the third-party module cleanenv
package config
import (
"time"
)
type CompositorContract interface {
LoadEnv() error
LoadConf(path string) error
}
type Compositor struct {
CMDLine *CMDLine
Conf *Conf
Env *Env
}
type Conf struct {
Mode string `mapstructure:"mode"`
ComDir string `mapstructure:"com_dir"`
HTTPServer HTTPServer `mapstructure:"http_server"`
TLS TLS `mapstructure:"tls"`
Updates Updates `mapstructure:"updates"`
Log Log `mapstructure:"log"`
DisableWarnings []string `mapstructure:"disable_warnings"`
}
type HTTPServer struct {
Address string `mapstructure:"address"`
Port string `mapstructure:"port"`
Timeout time.Duration `mapstructure:"timeout"`
IdleTimeout time.Duration `mapstructure:"idle_timeout"`
HTTPServer_Api HTTPServer_Api `mapstructure:"api"`
}
type HTTPServer_Api struct {
LatestVer string `mapstructure:"latest-version"`
Layers []string `mapstructure:"layers"`
}
type TLS struct {
TlsEnabled bool `mapstructure:"enabled"`
CertFile string `mapstructure:"cert_file"`
KeyFile string `mapstructure:"key_file"`
}
type Updates struct {
UpdatesEnabled bool `mapstructure:"enabled"`
CheckInterval time.Duration `mapstructure:"check_interval"`
RepositoryURL string `mapstructure:"repository_url"`
WantedVersion string `mapstructure:"wanted_version"`
}
type Log struct {
Level string `mapstructure:"level"`
OutPath string `mapstructure:"out_path"`
}
// ConfigEnv structure for environment variables
type Env struct {
ConfigPath string `mapstructure:"config_path"`
NodePath string `mapstructure:"node_path"`
ParentStagePID int `mapstructure:"parent_pid"`
}
type CMDLine struct {
Run Run
Node Root
}
type Root struct {
Debug bool `persistent:"true" full:"debug" short:"d" def:"false" desc:"Set debug mode"`
}
type Run struct {
ConfigPath string `persistent:"true" full:"config" short:"c" def:"./config.yaml" desc:"Path to configuration file"`
Test []int `persistent:"true" full:"test" short:"t" def:"" desc:"js test"`
}

View File

@@ -1,22 +0,0 @@
package rpc
import "encoding/json"
func NewError(code int, message string, id *json.RawMessage) *RPCResponse {
return &RPCResponse{
JSONRPC: JSONRPCVersion,
ID: id,
Error: map[string]any{
"code": code,
"message": message,
},
}
}
func NewResponse(result any, id *json.RawMessage) *RPCResponse {
return &RPCResponse{
JSONRPC: JSONRPCVersion,
ID: id,
Result: result,
}
}

View File

@@ -1,120 +0,0 @@
package sv1
import (
"fmt"
"log/slog"
"os"
"path/filepath"
"strconv"
"github.com/akyaiy/GoSally-mvp/internal/engine/logs"
"github.com/akyaiy/GoSally-mvp/internal/server/rpc"
lua "github.com/yuin/gopher-lua"
)
func (h *HandlerV1) handleLUA(path string, req *rpc.RPCRequest) *rpc.RPCResponse {
L := lua.NewState()
defer L.Close()
inTable := L.NewTable()
paramsTable := L.NewTable()
if fetchedParams, ok := req.Params.(map[string]any); ok {
for k, v := range fetchedParams {
L.SetField(paramsTable, k, ConvertGolangTypesToLua(L, v))
}
}
L.SetField(inTable, "Params", paramsTable)
L.SetGlobal("In", inTable)
outTable := L.NewTable()
resultTable := L.NewTable()
L.SetField(outTable, "Result", resultTable)
L.SetGlobal("Out", outTable)
logTable := L.NewTable()
logFuncs := map[string]func(string, ...any){
"Info": h.x.SLog.Info,
"Debug": h.x.SLog.Debug,
"Error": h.x.SLog.Error,
"Warn": h.x.SLog.Warn,
}
for name, logFunc := range logFuncs {
L.SetField(logTable, name, L.NewFunction(func(L *lua.LState) int {
msg := L.ToString(1)
logFunc(fmt.Sprintf("the script says: %s", msg), slog.String("script", path))
return 0
}))
}
L.SetField(logTable, "Event", L.NewFunction(func(L *lua.LState) int {
msg := L.ToString(1)
h.x.Log.Printf("%s: %s", path, msg)
return 0
}))
L.SetField(logTable, "EventError", L.NewFunction(func(L *lua.LState) int {
msg := L.ToString(1)
h.x.Log.Printf("%s: %s: %s", logs.PrintError(), path, msg)
return 0
}))
L.SetField(logTable, "EventWarn", L.NewFunction(func(L *lua.LState) int {
msg := L.ToString(1)
h.x.Log.Printf("%s: %s: %s", logs.PrintWarn(), path, msg)
return 0
}))
L.SetGlobal("Log", logTable)
prep := filepath.Join(h.x.Config.Conf.ComDir, "_prepare.lua")
if _, err := os.Stat(prep); err == nil {
if err := L.DoFile(prep); err != nil {
return rpc.NewError(rpc.ErrInternalError, err.Error(), req.ID)
}
}
if err := L.DoFile(path); err != nil {
return rpc.NewError(rpc.ErrInternalError, err.Error(), req.ID)
}
lv := L.GetGlobal("Out")
outTbl, ok := lv.(*lua.LTable)
if !ok {
return rpc.NewError(rpc.ErrInternalError, "Out is not a table", req.ID)
}
// Check if Out.Error exists
if errVal := outTbl.RawGetString("Error"); errVal != lua.LNil {
if errTbl, ok := errVal.(*lua.LTable); ok {
code := rpc.ErrInternalError
message := "Internal script error"
if c := errTbl.RawGetString("code"); c.Type() == lua.LTNumber {
code = int(c.(lua.LNumber))
}
if msg := errTbl.RawGetString("message"); msg.Type() == lua.LTString {
message = msg.String()
}
h.x.SLog.Error("the script terminated with an error", slog.String("code", strconv.Itoa(code)), slog.String("message", message))
return rpc.NewError(code, message, req.ID)
}
return rpc.NewError(rpc.ErrInternalError, "Out.Error is not a table", req.ID)
}
// Otherwise, parse Out.Result
resultVal := outTbl.RawGetString("Result")
resultTbl, ok := resultVal.(*lua.LTable)
if !ok {
return rpc.NewError(rpc.ErrInternalError, "Out.Result is not a table", req.ID)
}
out := make(map[string]any)
resultTbl.ForEach(func(key lua.LValue, value lua.LValue) {
out[key.String()] = ConvertLuaTypesToGolang(value)
})
out["responsible-node"] = h.cs.UUID32
return rpc.NewResponse(out, req.ID)
}

View File

@@ -1,77 +0,0 @@
package sv1
import (
"fmt"
lua "github.com/yuin/gopher-lua"
)
func ConvertLuaTypesToGolang(value lua.LValue) any {
switch value.Type() {
case lua.LTString:
return value.String()
case lua.LTNumber:
return float64(value.(lua.LNumber))
case lua.LTBool:
return bool(value.(lua.LBool))
case lua.LTTable:
tbl := value.(*lua.LTable)
var arr []any
isArray := true
tbl.ForEach(func(key, val lua.LValue) {
if key.Type() != lua.LTNumber {
isArray = false
}
arr = append(arr, ConvertLuaTypesToGolang(val))
})
if isArray {
return arr
}
result := make(map[string]any)
tbl.ForEach(func(key, val lua.LValue) {
result[key.String()] = ConvertLuaTypesToGolang(val)
})
return result
case lua.LTNil:
return nil
default:
return value.String()
}
}
func ConvertGolangTypesToLua(L *lua.LState, val any) lua.LValue {
switch v := val.(type) {
case string:
return lua.LString(v)
case bool:
return lua.LBool(v)
case int:
return lua.LNumber(float64(v))
case int64:
return lua.LNumber(float64(v))
case float32:
return lua.LNumber(float64(v))
case float64:
return lua.LNumber(v)
case []any:
tbl := L.NewTable()
for i, item := range v {
tbl.RawSetInt(i+1, ConvertGolangTypesToLua(L, item))
}
return tbl
case map[string]any:
tbl := L.NewTable()
for key, value := range v {
tbl.RawSetString(key, ConvertGolangTypesToLua(L, value))
}
return tbl
case nil:
return lua.LNil
default:
return lua.LString(fmt.Sprintf("%v", v))
}
}

10
main.go
View File

@@ -1,10 +0,0 @@
package main
import (
"github.com/akyaiy/GoSally-mvp/cmd"
_ "modernc.org/sqlite"
)
func main() {
cmd.Execute()
}

View File

@@ -1,3 +1,5 @@
// The cmd package is the main package where all the main hooks and methods are called.
// GoSally uses spf13/cobra to organize all the calls.
package cmd
import (
@@ -5,9 +7,9 @@ import (
"log"
"os"
"github.com/akyaiy/GoSally-mvp/hooks"
"github.com/akyaiy/GoSally-mvp/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/internal/engine/logs"
"github.com/akyaiy/GoSally-mvp/src/hooks"
"github.com/akyaiy/GoSally-mvp/src/internal/colors"
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
"github.com/spf13/cobra"
)
@@ -22,9 +24,11 @@ scripts in a given directory. For more information, visit: https://gosally.oblat
},
}
// Execute prepares global log, loads cmdline args
// and executes rootCmd.Execute()
func Execute() {
log.SetOutput(os.Stdout)
log.SetPrefix(logs.SetBrightBlack(fmt.Sprintf("(%s) ", corestate.StageNotReady)))
log.SetPrefix(colors.SetBrightBlack(fmt.Sprintf("(%s) ", corestate.StageNotReady)))
log.SetFlags(log.Ldate | log.Ltime)
hooks.Compositor.LoadCMDLine(rootCmd)
_ = rootCmd.Execute()

View File

@@ -1,7 +1,7 @@
package cmd
import (
"github.com/akyaiy/GoSally-mvp/hooks"
"github.com/akyaiy/GoSally-mvp/src/hooks"
"github.com/spf13/cobra"
)
@@ -11,6 +11,7 @@ var runCmd = &cobra.Command{
Short: "Run node normally",
Long: `
"run" starts the node with settings depending on the configuration file`,
// hooks.Run essentially the heart of the program
Run: hooks.Run,
}

View File

@@ -4,7 +4,8 @@ import (
"fmt"
"runtime"
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/src/internal/server/sv1"
"github.com/spf13/cobra"
)
@@ -13,7 +14,8 @@ var verCmd = &cobra.Command{
Aliases: []string{"ver", "v"},
Short: "Return node version",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("GoSally node: %s\n", config.NodeVersion)
fmt.Printf("Go Sally version: %s\n", config.NodeVersion)
fmt.Printf("sv1 version: %s\n", sv1.SV1Version)
fmt.Printf("Go version: %s\n", runtime.Version())
fmt.Printf("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
},

View File

@@ -1,12 +1,14 @@
module github.com/akyaiy/GoSally-mvp
module github.com/akyaiy/GoSally-mvp/src
go 1.24.4
require (
github.com/go-chi/chi/v5 v5.2.2
github.com/google/uuid v1.6.0
github.com/spf13/cobra v1.9.1
github.com/spf13/viper v1.20.1
github.com/yuin/gopher-lua v1.1.1
golang.org/x/crypto v0.40.0
golang.org/x/net v0.42.0
gopkg.in/ini.v1 v1.67.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
@@ -17,7 +19,7 @@ require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/golang-jwt/jwt/v5 v5.3.0
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
@@ -29,7 +31,6 @@ require (
github.com/spf13/cast v1.9.2 // indirect
github.com/spf13/pflag v1.0.7 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/text v0.27.0 // indirect

View File

@@ -13,8 +13,12 @@ github.com/go-chi/cors v1.2.2 h1:Jmey33TE+b+rB7fT8MUy1u0I4L+NARQlK6LhzKPSyQE=
github.com/go-chi/cors v1.2.2/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@@ -36,12 +40,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
github.com/sagikazarmark/locafero v0.10.0 h1:FM8Cv6j2KqIhM2ZK7HZjm4mpj9NBktLgowT1aN9q5Cc=
github.com/sagikazarmark/locafero v0.10.0/go.mod h1:Ieo3EUsjifvQu4NZwV5sPd4dwvu0OCgEQV7vjc9yDjw=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
@@ -61,17 +61,23 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -81,11 +87,29 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
modernc.org/cc/v4 v4.26.3 h1:yEN8dzrkRFnn4PUUKXLYIqVf2PJYAEjMTFjO3BDGc3I=
modernc.org/cc/v4 v4.26.3/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU=
modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE=
modernc.org/fileutil v1.3.8 h1:qtzNm7ED75pd1C7WgAGcK4edm4fvhtBsEiI/0NQ54YM=
modernc.org/fileutil v1.3.8/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
modernc.org/libc v1.66.6 h1:RyQpwAhM/19nXD8y3iejM/AjmKwY2TjxZTlUWTsWw2U=
modernc.org/libc v1.66.6/go.mod h1:j8z0EYAuumoMQ3+cWXtmw6m+LYn3qm8dcZDFtFTSq+M=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek=
modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=

420
src/hooks/initial.go Normal file
View File

@@ -0,0 +1,420 @@
package hooks
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"io/fs"
"log"
"log/slog"
"os"
"os/signal"
"path/filepath"
"reflect"
"slices"
"strings"
"syscall"
"time"
"github.com/akyaiy/GoSally-mvp/src/internal/colors"
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/src/internal/core/run_manager"
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/logs"
"gopkg.in/ini.v1"
)
// The config composer needs to be in the global scope
var Compositor *config.Compositor = config.NewCompositor()
func InitGlobalLoggerHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
x.Config = Compositor
x.Log.SetOutput(os.Stdout)
x.Log.SetPrefix(colors.SetBrightBlack(fmt.Sprintf("(%s) ", cs.Stage)))
x.Log.SetFlags(log.Ldate | log.Ltime)
}
// First stage: pre-init
func InitCorestateHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
*cs = *corestate.NewCorestate(&corestate.CoreState{
UUID32DirName: "uuid",
NodeBinName: filepath.Base(os.Args[0]),
NodeVersion: config.NodeVersion,
MetaDir: "./.meta",
Stage: corestate.StagePreInit,
StartTimestampUnix: time.Now().Unix(),
})
}
func InitConfigLoadHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
x.Log.SetPrefix(colors.SetYellow(fmt.Sprintf("(%s) ", cs.Stage)))
if err := x.Config.LoadEnv(); err != nil {
x.Log.Fatalf("env load error: %s", err)
}
cs.NodePath = *x.Config.Env.NodePath
if cfgPath := x.Config.CMDLine.Run.ConfigPath; cfgPath != "" {
x.Config.Env.ConfigPath = &cfgPath
}
if err := x.Config.LoadConf(*x.Config.Env.ConfigPath); err != nil {
x.Log.Fatalf("conf load error: %s", err)
}
}
// The hook reads or prepares a persistent uuid for the node
func InitUUIDHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
uuid32, err := corestate.GetNodeUUID(filepath.Join(cs.MetaDir, "uuid"))
if errors.Is(err, fs.ErrNotExist) {
if err := corestate.SetNodeUUID(filepath.Join(cs.NodePath, cs.MetaDir, cs.UUID32DirName)); err != nil {
x.Log.Fatalf("Cannod generate node uuid: %s", err.Error())
}
uuid32, err = corestate.GetNodeUUID(filepath.Join(cs.MetaDir, "uuid"))
if err != nil {
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
}
if err != nil {
x.Log.Fatalf("uuid load error: %s", err)
}
cs.UUID32 = uuid32
corestate.NODE_UUID = uuid32
}
// The hook is responsible for checking the initialization stage
// and restarting in some cases
func InitRuntimeHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
if *x.Config.Env.ParentStagePID != os.Getpid() {
// still pre-init stage
runDir, err := run_manager.Create(cs.UUID32)
if err != nil {
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
cs.RunDir = runDir
input, err := os.Open(os.Args[0])
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
if err := run_manager.Set(cs.NodeBinName); err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
fmgr := run_manager.File(cs.NodeBinName)
output, err := fmgr.Open()
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
if _, err := io.Copy(output, input); err != nil {
fmgr.Close()
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
if err := os.Chmod(filepath.Join(cs.RunDir, cs.NodeBinName), 0755); err != nil {
fmgr.Close()
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
input.Close()
fmgr.Close()
runArgs := os.Args
runArgs[0] = filepath.Join(cs.RunDir, cs.NodeBinName)
// prepare environ
env := utils.SetEviron(os.Environ(), fmt.Sprintf("GS_PARENT_PID=%d", os.Getpid()))
if err := syscall.Exec(runArgs[0], runArgs, env); err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
}
x.Log.Printf("Node uuid is %s", cs.UUID32)
}
// post-init stage
// The hook creates a run.lock file, which contains information
// about the process and the node, in the runtime directory.
func InitRunlockHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
NodeApp.Fallback(func(ctx context.Context, cs *corestate.CoreState, x *app.AppX) {
x.Log.Println("Cleaning up...")
if err := run_manager.Clean(); err != nil {
x.Log.Printf("%s: Cleanup error: %s", colors.PrintError(), err.Error())
}
x.Log.Println("bye!")
})
cs.Stage = corestate.StagePostInit
x.Log.SetPrefix(colors.SetBlue(fmt.Sprintf("(%s) ", cs.Stage)))
cs.RunDir = run_manager.Toggle()
exist, err := utils.ExistsMatchingDirs(filepath.Join(os.TempDir(), fmt.Sprintf("/*-%s-%s", cs.UUID32, "gosally-runtime")), cs.RunDir)
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
if exist {
_ = run_manager.Clean()
x.Log.Fatalf("Unable to continue node operation: A node with the same identifier was found in the runtime environment")
}
if err := run_manager.Set("run.lock"); err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
lockPath, err := run_manager.Get("run.lock")
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
lockFile := ini.Empty()
secRun, err := lockFile.NewSection("runtime")
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
secRun.Key("pid").SetValue(fmt.Sprintf("%d/%d", os.Getpid(), x.Config.Env.ParentStagePID))
secRun.Key("version").SetValue(cs.NodeVersion)
secRun.Key("uuid").SetValue(cs.UUID32)
secRun.Key("timestamp").SetValue(time.Unix(cs.StartTimestampUnix, 0).Format("2006-01-02/15:04:05 MST"))
secRun.Key("timestamp-unix").SetValue(fmt.Sprintf("%d", cs.StartTimestampUnix))
err = lockFile.SaveTo(lockPath)
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
}
// The hook reads the configuration and replaces special expressions
// (%tmp% and so on) in string fields with the required data.
func InitConfigReplHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
if !slices.Contains(*x.Config.Conf.DisableWarnings, "--WNonStdTmpDir") && os.TempDir() != "/tmp" {
x.Log.Printf("%s: %s", colors.PrintWarn(), "Non-standard value specified for temporary directory")
}
replacements := map[string]any{
"%tmp%": filepath.Clean(run_manager.RuntimeDir()),
"%path%": *x.Config.Env.NodePath,
"%stdout%": "_1STDout",
"%stderr%": "_2STDerr",
"%1%": "_1STDout",
"%2%": "_2STDerr",
}
processConfig(&x.Config.Conf, replacements)
if !slices.Contains(logs.Levels.Available, *x.Config.Conf.Log.Level) {
if !slices.Contains(*x.Config.Conf.DisableWarnings, "--WUndefLogLevel") {
x.Log.Printf("%s: %s", colors.PrintWarn(), fmt.Sprintf("Unknown logging level %s, fallback level: %s", *x.Config.Conf.Log.Level, logs.Levels.Fallback))
}
x.Config.Conf.Log.Level = &logs.Levels.Fallback
}
}
// The hook is responsible for outputting the
// final config and asking for confirmation.
func InitConfigPrintHook(ctx context.Context, cs *corestate.CoreState, x *app.AppX) {
if *x.Config.Conf.Node.ShowConfig {
fmt.Printf("Configuration from %s:\n", x.Config.CMDLine.Run.ConfigPath)
x.Config.Print(x.Config.Conf)
fmt.Printf("Environment:\n")
x.Config.Print(x.Config.Env)
if cs.UUID32 != "" && !askConfirm("Is that ok?", true) {
x.Log.Printf("Cancel launch")
NodeApp.CallFallback(ctx)
}
}
if *x.Config.Conf.Node.Name == "noname" {
x.Log.Printf("Starting node")
} else {
x.Log.Printf("Starting \"%s\" node", *x.Config.Conf.Node.Name)
}
}
func InitSLogHook(_ context.Context, cs *corestate.CoreState, x *app.AppX) {
cs.Stage = corestate.StageReady
x.Log.SetPrefix(colors.SetGreen(fmt.Sprintf("(%s) ", cs.Stage)))
x.SLog = new(slog.Logger)
newSlog, err := logs.SetupLogger(x.Config.Conf.Log)
if err != nil {
_ = run_manager.Clean()
x.Log.Fatalf("Unexpected failure: %s", err.Error())
}
*x.SLog = *newSlog
}
// The method goes through the entire config structure through
// reflection and replaces string fields with the required ones.
func processConfig(conf any, replacements map[string]any) error {
val := reflect.ValueOf(conf)
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
switch val.Kind() {
case reflect.Struct:
for i := 0; i < val.NumField(); i++ {
field := val.Field(i)
if field.CanAddr() && field.CanSet() {
if err := processConfig(field.Addr().Interface(), replacements); err != nil {
return err
}
}
}
case reflect.Slice:
for i := 0; i < val.Len(); i++ {
elem := val.Index(i)
if elem.CanAddr() && elem.CanSet() {
if err := processConfig(elem.Addr().Interface(), replacements); err != nil {
return err
}
}
}
case reflect.Map:
for _, key := range val.MapKeys() {
elem := val.MapIndex(key)
if elem.CanInterface() {
newVal := reflect.New(elem.Type()).Elem()
newVal.Set(elem)
if err := processConfig(newVal.Addr().Interface(), replacements); err != nil {
return err
}
val.SetMapIndex(key, newVal)
}
}
case reflect.String:
str := val.String()
if replacement, exists := replacements[str]; exists {
if err := setValue(val, replacement); err != nil {
return fmt.Errorf("failed to set %q: %v", str, err)
}
} else {
for placeholder, replacement := range replacements {
if strings.Contains(str, placeholder) {
replacementStr, err := toString(replacement)
if err != nil {
return fmt.Errorf("invalid replacement for %q: %v", placeholder, err)
}
newStr := strings.ReplaceAll(str, placeholder, replacementStr)
val.SetString(newStr)
}
}
}
case reflect.Ptr:
if !val.IsNil() {
elem := val.Elem()
if elem.Kind() == reflect.String {
str := elem.String()
if replacement, exists := replacements[str]; exists {
strVal, err := toString(replacement)
if err != nil {
return fmt.Errorf("cannot convert replacement to string: %v", err)
}
elem.SetString(strVal)
} else {
for placeholder, replacement := range replacements {
if strings.Contains(str, placeholder) {
replacementStr, err := toString(replacement)
if err != nil {
return fmt.Errorf("invalid replacement for %q: %v", placeholder, err)
}
newStr := strings.ReplaceAll(str, placeholder, replacementStr)
elem.SetString(newStr)
}
}
}
} else {
return processConfig(elem.Addr().Interface(), replacements)
}
}
}
return nil
}
func setValue(val reflect.Value, replacement any) error {
if !val.CanSet() {
return fmt.Errorf("value is not settable")
}
replacementVal := reflect.ValueOf(replacement)
if replacementVal.Type().AssignableTo(val.Type()) {
val.Set(replacementVal)
return nil
}
if val.Kind() == reflect.String {
str, err := toString(replacement)
if err != nil {
return fmt.Errorf("cannot convert replacement to string: %v", err)
}
val.SetString(str)
return nil
}
return fmt.Errorf("type mismatch: cannot assign %T to %v", replacement, val.Type())
}
func toString(v any) (string, error) {
switch s := v.(type) {
case string:
return s, nil
case fmt.Stringer:
return s.String(), nil
default:
return fmt.Sprint(v), nil
}
}
func askConfirm(prompt string, defaultYes bool) bool {
ctx, _ := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
fmt.Print(prompt)
if defaultYes {
fmt.Printf(" (%s/%s): ", colors.SetBrightGreen("Y"), colors.SetBrightRed("n"))
} else {
fmt.Printf(" (%s/%s): ", colors.SetBrightGreen("n"), colors.SetBrightRed("Y"))
}
inputChan := make(chan string, 1)
go func() {
reader := bufio.NewReader(os.Stdin)
text, _ := reader.ReadString('\n')
inputChan <- text
}()
select {
case <-ctx.Done():
fmt.Println("")
NodeApp.CallFallback(ctx)
os.Exit(3)
case text := <-inputChan:
text = strings.TrimSpace(strings.ToLower(text))
if text == "" {
return defaultYes
}
if text == "y" || text == "yes" {
return true
}
return false
}
return defaultYes
}

185
src/hooks/run.go Normal file
View File

@@ -0,0 +1,185 @@
package hooks
import (
"context"
"errors"
"fmt"
"log"
"log/slog"
"net"
"net/http"
"regexp"
"time"
"github.com/akyaiy/GoSally-mvp/src/internal/colors"
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/src/internal/core/run_manager"
"github.com/akyaiy/GoSally-mvp/src/internal/core/update"
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/logs"
"github.com/akyaiy/GoSally-mvp/src/internal/server/gateway"
"github.com/akyaiy/GoSally-mvp/src/internal/server/session"
"github.com/akyaiy/GoSally-mvp/src/internal/server/sv1"
"github.com/akyaiy/GoSally-mvp/src/internal/server/sv2"
"github.com/go-chi/chi/v5"
"github.com/go-chi/cors"
"github.com/spf13/cobra"
"golang.org/x/net/netutil"
)
var NodeApp = app.New()
var AllowedCmdPattern = `^[a-zA-Z0-9]+(\.[a-zA-Z0-9]+)*$`
func Run(cmd *cobra.Command, args []string) {
NodeApp.InitialHooks(
InitGlobalLoggerHook, InitCorestateHook, InitConfigLoadHook,
InitUUIDHook, InitRuntimeHook, InitRunlockHook,
InitConfigReplHook, InitConfigPrintHook, InitSLogHook,
)
NodeApp.Run(RunHook)
}
func RunHook(ctx context.Context, cs *corestate.CoreState, x *app.AppX) error {
ctxMain, cancelMain := context.WithCancel(ctx)
runLockFile := run_manager.File("run.lock")
_, err := runLockFile.Open()
if err != nil {
x.Log.Fatalf("cannot open run.lock: %s", err)
}
_, err = runLockFile.Watch(ctxMain, func() {
x.Log.Printf("run.lock was touched")
_ = run_manager.Clean()
cancelMain()
})
if err != nil {
x.Log.Printf("watch error: %s", err)
}
serverv1 := sv1.InitV1Server(&sv1.HandlerV1InitStruct{
X: x,
CS: cs,
AllowedCmd: regexp.MustCompile(AllowedCmdPattern),
Ver: "v1",
})
sv2 := sv2.InitServer(&sv2.HandlerInitStruct{
X: x,
CS: cs,
AllowedCmd: regexp.MustCompile(AllowedCmdPattern),
Ver: "v2",
})
session_manager := session.New(*x.Config.Conf.HTTPServer.SessionTTL)
s := gateway.InitGateway(&gateway.GatewayServerInit{
SM: session_manager,
CS: cs,
X: x,
}, serverv1, sv2)
r := chi.NewRouter()
r.Use(cors.Handler(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"POST"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token", "X-Session-UUID"},
AllowCredentials: true,
MaxAge: 300,
}))
r.HandleFunc(config.ComDirRoute, s.Handle)
r.Route("/favicon.ico", func(r chi.Router) {
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
})
})
srv := &http.Server{
Addr: *x.Config.Conf.HTTPServer.Address,
Handler: r,
ErrorLog: log.New(&logs.SlogWriter{
Logger: x.SLog,
Level: slog.LevelError,
}, "", 0),
}
NodeApp.Fallback(func(ctx context.Context, cs *corestate.CoreState, x *app.AppX) {
if err := srv.Shutdown(ctxMain); err != nil {
x.Log.Printf("%s: Failed to stop the server gracefully: %s", colors.PrintError(), err.Error())
} else {
x.Log.Printf("Server stopped gracefully")
}
x.Log.Println("Cleaning up...")
if err := run_manager.Clean(); err != nil {
x.Log.Printf("%s: Cleanup error: %s", colors.PrintError(), err.Error())
}
x.Log.Println("bye!")
})
go func() {
defer utils.CatchPanicWithCancel(cancelMain)
if *x.Config.Conf.TLS.TlsEnabled {
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%s", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port))
if err != nil {
x.Log.Printf("%s: Failed to start TLS listener: %s", colors.PrintError(), err.Error())
cancelMain()
return
}
x.Log.Printf("Serving on %s port %s with TLS... (https://%s%s)", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port, fmt.Sprintf("%s:%s", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port), config.ComDirRoute)
limitedListener := netutil.LimitListener(listener, 100)
if err := srv.ServeTLS(limitedListener, *x.Config.Conf.TLS.CertFile, *x.Config.Conf.TLS.KeyFile); err != nil && !errors.Is(err, http.ErrServerClosed) {
x.Log.Printf("%s: Failed to start HTTPS server: %s", colors.PrintError(), err.Error())
cancelMain()
}
} else {
x.Log.Printf("Serving on %s port %s... (http://%s%s)", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port, fmt.Sprintf("%s:%s", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port), config.ComDirRoute)
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%s", *x.Config.Conf.HTTPServer.Address, *x.Config.Conf.HTTPServer.Port))
if err != nil {
x.Log.Printf("%s: Failed to start listener: %s", colors.PrintError(), err.Error())
cancelMain()
return
}
limitedListener := netutil.LimitListener(listener, 100)
if err := srv.Serve(limitedListener); err != nil && !errors.Is(err, http.ErrServerClosed) {
x.Log.Printf("%s: Failed to start HTTP server: %s", colors.PrintError(), err.Error())
cancelMain()
}
}
}()
session_manager.StartCleanup(5 * time.Second)
if *x.Config.Conf.Updates.UpdatesEnabled {
go func() {
defer utils.CatchPanicWithCancel(cancelMain)
updated := update.NewUpdater(&update.UpdaterInit{
X: x,
Ctx: ctxMain,
Cancel: cancelMain,
})
updated.Shutdownfunc(cancelMain)
for {
isNewUpdate, err := updated.CkeckUpdates()
if err != nil {
x.Log.Printf("Failed to check for updates: %s", err.Error())
}
if isNewUpdate {
if err := updated.Update(); err != nil {
x.Log.Printf("Failed to update: %s", err.Error())
} else {
x.Log.Printf("Update completed successfully")
}
}
time.Sleep(*x.Config.Conf.Updates.CheckInterval)
}
}()
}
<-ctxMain.Done()
NodeApp.CallFallback(ctx)
return nil
}

View File

@@ -1,4 +1,4 @@
package logs
package colors
import "fmt"

View File

@@ -1,5 +1,7 @@
package corestate
var NODE_UUID string
type Stage string
const (

View File

@@ -7,8 +7,8 @@ import (
"path/filepath"
"strings"
"github.com/akyaiy/GoSally-mvp/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
)
// GetNodeUUID outputs the correct uuid from the file at the path specified in the arguments.

View File

@@ -6,7 +6,7 @@ import (
"os"
"path/filepath"
"github.com/akyaiy/GoSally-mvp/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
)
type RunManagerContract interface {

View File

@@ -13,10 +13,10 @@ import (
"strings"
"syscall"
"github.com/akyaiy/GoSally-mvp/internal/core/run_manager"
"github.com/akyaiy/GoSally-mvp/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/src/internal/core/run_manager"
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
"golang.org/x/net/context"
)
@@ -134,7 +134,7 @@ func (u *Updater) GetCurrentVersion() (Version, Branch, error) {
}
func (u *Updater) GetLatestVersion(updateBranch Branch) (Version, Branch, error) {
repoURL := u.x.Config.Conf.Updates.RepositoryURL
repoURL := *u.x.Config.Conf.Updates.RepositoryURL
if repoURL == "" {
u.x.Log.Printf("Failed to get latest version: %s", "RepositoryURL is empty in config")
return "", "", errors.New("repository URL is empty")
@@ -192,7 +192,7 @@ func (u *Updater) CkeckUpdates() (IsNewUpdate, error) {
}
func (u *Updater) Update() error {
if !u.x.Config.Conf.Updates.UpdatesEnabled {
if !*u.x.Config.Conf.Updates.UpdatesEnabled {
return errors.New("updates are disabled in config, skipping update")
}
@@ -212,7 +212,7 @@ func (u *Updater) Update() error {
}
updateArchiveName := fmt.Sprintf("%s.v%s-%s", config.UpdateArchiveName, latestVersion, latestBranch)
updateDest := fmt.Sprintf("%s/%s.%s", u.x.Config.Conf.Updates.RepositoryURL, updateArchiveName, "tar.gz")
updateDest := fmt.Sprintf("%s/%s.%s", *u.x.Config.Conf.Updates.RepositoryURL, updateArchiveName, "tar.gz")
resp, err := http.Get(updateDest)
if err != nil {
@@ -278,7 +278,7 @@ func (u *Updater) Update() error {
func (u *Updater) InstallAndRestart() error {
nodePath := u.x.Config.Env.NodePath
nodePath := *u.x.Config.Env.NodePath
if nodePath == "" {
return errors.New("GS_NODE_PATH environment variable is not set")
}

View File

@@ -5,7 +5,7 @@ import (
"encoding/hex"
"errors"
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
)
func NewUUIDRaw(length int) ([]byte, error) {

View File

@@ -9,12 +9,12 @@ import (
"sync"
"syscall"
"github.com/akyaiy/GoSally-mvp/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
)
type AppContract interface {
InitialHooks(fn ...func(cs *corestate.CoreState, x *AppX))
InitialHooks(fn ...func(ctx context.Context, cs *corestate.CoreState, x *AppX))
Run(fn func(ctx context.Context, cs *corestate.CoreState, x *AppX) error)
Fallback(fn func(ctx context.Context, cs *corestate.CoreState, x *AppX))
@@ -22,7 +22,7 @@ type AppContract interface {
}
type App struct {
initHooks []func(cs *corestate.CoreState, x *AppX)
initHooks []func(ctx context.Context, cs *corestate.CoreState, x *AppX)
runHook func(ctx context.Context, cs *corestate.CoreState, x *AppX) error
fallback func(ctx context.Context, cs *corestate.CoreState, x *AppX)
@@ -47,7 +47,7 @@ func New() AppContract {
}
}
func (a *App) InitialHooks(fn ...func(cs *corestate.CoreState, x *AppX)) {
func (a *App) InitialHooks(fn ...func(ctx context.Context, cs *corestate.CoreState, x *AppX)) {
a.initHooks = append(a.initHooks, fn...)
}
@@ -58,13 +58,13 @@ func (a *App) Fallback(fn func(ctx context.Context, cs *corestate.CoreState, x *
func (a *App) Run(fn func(ctx context.Context, cs *corestate.CoreState, x *AppX) error) {
a.runHook = fn
for _, hook := range a.initHooks {
hook(a.Corestate, a.AppX)
}
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
defer stop()
for _, hook := range a.initHooks {
hook(ctx, a.Corestate, a.AppX)
}
defer func() {
if r := recover(); r != nil {
a.AppX.Log.Printf("PANIC recovered: %v", r)
@@ -90,5 +90,6 @@ func (a *App) CallFallback(ctx context.Context) {
if a.fallback != nil {
a.fallback(ctx, a.Corestate, a.AppX)
}
os.Exit(0)
})
}

View File

@@ -43,10 +43,13 @@ func (c *Compositor) LoadConf(path string) error {
v.SetConfigType("yaml")
// defaults
v.SetDefault("mode", "dev")
v.SetDefault("com_dir", "./com/")
v.SetDefault("node.name", "noname")
v.SetDefault("node.mode", "dev")
v.SetDefault("node.show_config", "false")
v.SetDefault("node.com_dir", "./com/")
v.SetDefault("http_server.address", "0.0.0.0")
v.SetDefault("http_server.port", "8080")
v.SetDefault("http_server.session_ttl", "30m")
v.SetDefault("http_server.timeout", "5s")
v.SetDefault("http_server.idle_timeout", "60s")
v.SetDefault("tls.enabled", false)
@@ -55,8 +58,10 @@ func (c *Compositor) LoadConf(path string) error {
v.SetDefault("updates.enabled", false)
v.SetDefault("updates.check_interval", "2h")
v.SetDefault("updates.wanted_version", "latest-stable")
v.SetDefault("log.json_format", "false")
v.SetDefault("log.level", "info")
v.SetDefault("log.out_path", "")
v.SetDefault("log.output", "%2%")
v.SetDefault("disable_warnings", []string{})
if err := v.ReadInConfig(); err != nil {
return fmt.Errorf("error reading config: %w", err)

View File

@@ -0,0 +1,82 @@
// Package config provides configuration management for the application.
// config is built on top of the third-party module cleanenv
package config
import (
"time"
)
type CompositorContract interface {
LoadEnv() error
LoadConf(path string) error
}
type Compositor struct {
CMDLine *CMDLine
Conf *Conf
Env *Env
}
type Conf struct {
Node *Node `mapstructure:"node"`
HTTPServer *HTTPServer `mapstructure:"http_server"`
TLS *TLS `mapstructure:"tls"`
Updates *Updates `mapstructure:"updates"`
Log *Log `mapstructure:"log"`
DisableWarnings *[]string `mapstructure:"disable_warnings"`
}
type Node struct {
Mode *string `mapstructure:"mode"`
Name *string `mapstructure:"name"`
ShowConfig *bool `mapstructure:"show_config"`
ComDir *string `mapstructure:"com_dir"`
}
type HTTPServer struct {
Address *string `mapstructure:"address"`
Port *string `mapstructure:"port"`
SessionTTL *time.Duration `mapstructure:"session_ttl"`
Timeout *time.Duration `mapstructure:"timeout"`
IdleTimeout *time.Duration `mapstructure:"idle_timeout"`
}
type TLS struct {
TlsEnabled *bool `mapstructure:"enabled"`
CertFile *string `mapstructure:"cert_file"`
KeyFile *string `mapstructure:"key_file"`
}
type Updates struct {
UpdatesEnabled *bool `mapstructure:"enabled"`
CheckInterval *time.Duration `mapstructure:"check_interval"`
RepositoryURL *string `mapstructure:"repository_url"`
WantedVersion *string `mapstructure:"wanted_version"`
}
type Log struct {
JSON *bool `mapstructure:"json_format"`
Level *string `mapstructure:"level"`
OutPath *string `mapstructure:"output"`
}
// ConfigEnv structure for environment variables
type Env struct {
ConfigPath *string `mapstructure:"config_path"`
NodePath *string `mapstructure:"node_path"`
ParentStagePID *int `mapstructure:"parent_pid"`
}
type CMDLine struct {
Run Run
Node Root
}
type Root struct {
Debug bool `persistent:"true" full:"debug" short:"d" def:"false" desc:"Set debug mode"`
}
type Run struct {
ConfigPath string `persistent:"true" full:"config" short:"c" def:"./config.yaml" desc:"Path to configuration file"`
Test []int `persistent:"true" full:"test" short:"t" def:"" desc:"js test"`
}

View File

@@ -2,6 +2,8 @@ package config
import "os"
// TODO: Need to make a more harmonious and understandable way of storing global variables
// UUIDLength is uuids length for sessions. By default it is 16 bytes.
var UUIDLength int = 16

View File

@@ -0,0 +1,72 @@
package config
import (
"fmt"
"reflect"
"time"
"github.com/akyaiy/GoSally-mvp/src/internal/colors"
)
func (c *Compositor) Print(v any) {
c.printConfig(v, " ")
}
func (c *Compositor) printConfig(v any, prefix string) {
val := reflect.ValueOf(v)
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < val.NumField(); i++ {
field := val.Field(i)
fieldType := typ.Field(i)
fieldName := fieldType.Name
if tag, ok := fieldType.Tag.Lookup("mapstructure"); ok {
if tag != "" {
fieldName = tag
}
}
coloredFieldName := colors.SetBrightCyan(fieldName)
if field.Kind() == reflect.Ptr {
if field.IsNil() {
fmt.Printf("%s%s: %s\n", prefix, coloredFieldName, colors.SetBrightRed("<nil>"))
continue
}
field = field.Elem()
}
if field.Kind() == reflect.Struct {
if field.Type() == reflect.TypeOf(time.Duration(0)) {
duration := field.Interface().(time.Duration)
fmt.Printf("%s%s: %s\n",
prefix,
coloredFieldName,
colors.SetBrightYellow(duration.String()))
} else {
fmt.Printf("%s%s:\n", prefix, coloredFieldName)
c.printConfig(field.Addr().Interface(), prefix+" ")
}
} else if field.Kind() == reflect.Slice {
fmt.Printf("%s%s: %s\n",
prefix,
coloredFieldName,
colors.SetBrightYellow(fmt.Sprintf("%v", field.Interface())))
} else {
value := field.Interface()
valueStr := fmt.Sprintf("%v", value)
if field.Kind() == reflect.String {
valueStr = fmt.Sprintf("\"%s\"", value)
}
fmt.Printf("%s%s: %s\n",
prefix,
coloredFieldName,
colors.SetBrightYellow(valueStr))
}
}
}

View File

@@ -10,16 +10,25 @@ import (
"log/slog"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/akyaiy/GoSally-mvp/internal/core/run_manager"
"github.com/akyaiy/GoSally-mvp/internal/engine/config"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/config"
"gopkg.in/natefinch/lumberjack.v2"
)
var GlobalLevel slog.Level
type levelsStruct struct {
Available []string
Fallback string
}
var Levels = levelsStruct{
Available: []string{
"debug", "info",
},
Fallback: "info",
}
type SlogWriter struct {
Logger *slog.Logger
Level slog.Level
@@ -32,11 +41,11 @@ func (w *SlogWriter) Write(p []byte) (n int, err error) {
}
// SetupLogger initializes and returns a logger based on the provided environment.
func SetupLogger(o config.Log) (*slog.Logger, error) {
func SetupLogger(o *config.Log) (*slog.Logger, error) {
var handlerOpts = slog.HandlerOptions{}
var writer io.Writer = os.Stdout
switch o.Level {
switch *o.Level {
case "debug":
GlobalLevel = slog.LevelDebug
handlerOpts.Level = slog.LevelDebug
@@ -48,32 +57,14 @@ func SetupLogger(o config.Log) (*slog.Logger, error) {
handlerOpts.Level = slog.LevelInfo
}
if o.OutPath != "" {
repl := map[string]string{
"tmp": filepath.Clean(run_manager.RuntimeDir()),
}
re := regexp.MustCompile(`%(\w+)%`)
result := re.ReplaceAllStringFunc(o.OutPath, func(match string) string {
sub := re.FindStringSubmatch(match)
if len(sub) < 2 {
return match
}
key := sub[1]
if val, ok := repl[key]; ok {
return val
}
return match
})
if strings.Contains(o.OutPath, "%tmp%") {
relPath := strings.TrimPrefix(result, filepath.Clean(run_manager.RuntimeDir()))
if err := run_manager.SetDir(relPath); err != nil {
return nil, err
}
}
switch *o.OutPath {
case "_1STDout":
writer = os.Stdout
case "_2STDerr":
writer = os.Stderr
default:
logFile := &lumberjack.Logger{
Filename: filepath.Join(result, "event.log"),
Filename: filepath.Join(*o.OutPath, "event.log"),
MaxSize: 10,
MaxBackups: 5,
MaxAge: 28,
@@ -82,6 +73,13 @@ func SetupLogger(o config.Log) (*slog.Logger, error) {
writer = logFile
}
log := slog.New(slog.NewJSONHandler(writer, &handlerOpts))
var handler slog.Handler
if *o.JSON {
handler = slog.NewJSONHandler(writer, &handlerOpts)
} else {
handler = slog.NewTextHandler(writer, &handlerOpts)
}
log := slog.New(handler)
return log, nil
}

View File

@@ -0,0 +1 @@
package lua

View File

@@ -0,0 +1,35 @@
package lua
import (
"sync"
lua "github.com/yuin/gopher-lua"
)
type LuaPool struct {
pool sync.Pool
}
func NewLuaPool() *LuaPool {
return &LuaPool{
pool: sync.Pool{
New: func() any {
L := lua.NewState()
return L
},
},
}
}
func (lp *LuaPool) Get() *lua.LState {
return lp.pool.Get().(*lua.LState)
}
func (lp *LuaPool) Put(L *lua.LState) {
L.Close()
newL := lua.NewState()
lp.pool.Put(newL)
}

View File

@@ -0,0 +1,25 @@
package lua
import (
"net/http"
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
)
type LuaEngineDeps struct {
HttpRequest *http.Request
JSONRPCRequest *rpc.RPCRequest
SessionUUID string
ScriptPath string
}
type LuaEngineContract interface {
Handle(deps *LuaEngineDeps) *rpc.RPCResponse
}
type LuaEngine struct {
x *app.AppX
cs *corestate.CoreState
}

View File

@@ -1,11 +1,13 @@
package gateway
import (
"context"
"net/http"
"github.com/akyaiy/GoSally-mvp/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/internal/server/rpc"
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
"github.com/akyaiy/GoSally-mvp/src/internal/server/session"
)
// serversApiVer is a type alias for string, used to represent API version strings in the GeneralServer.
@@ -13,7 +15,7 @@ type serversApiVer string
type ServerApiContract interface {
GetVersion() string
Handle(r *http.Request, req *rpc.RPCRequest) *rpc.RPCResponse
Handle(ctx context.Context, sid string, r *http.Request, req *rpc.RPCRequest) *rpc.RPCResponse
}
// GeneralServer implements the GeneralServerApiContract and serves as a router for different API versions.
@@ -22,6 +24,7 @@ type GatewayServer struct {
// The key is the version string, and the value is the server implementing GeneralServerApi
servers map[serversApiVer]ServerApiContract
sm *session.SessionManager
cs *corestate.CoreState
x *app.AppX
}

View File

@@ -3,12 +3,14 @@ package gateway
import (
"errors"
"github.com/akyaiy/GoSally-mvp/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/src/internal/server/session"
)
// GeneralServerInit structure only for initialization general server.
type GatewayServerInit struct {
SM *session.SessionManager
CS *corestate.CoreState
X *app.AppX
}
@@ -17,6 +19,7 @@ type GatewayServerInit struct {
func InitGateway(o *GatewayServerInit, servers ...ServerApiContract) *GatewayServer {
general := &GatewayServer{
servers: make(map[serversApiVer]ServerApiContract),
sm: o.SM,
cs: o.CS,
x: o.X,
}

View File

@@ -1,29 +1,42 @@
package gateway
import (
"context"
"encoding/json"
"io"
"log/slog"
"net/http"
"sync"
"github.com/akyaiy/GoSally-mvp/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/internal/server/rpc"
"github.com/akyaiy/GoSally-mvp/src/internal/core/utils"
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
"github.com/google/uuid"
)
func (gs *GatewayServer) Handle(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() // TODO
w.Header().Set("Content-Type", "application/json")
sessionUUID := r.Header.Get("X-Session-UUID")
if sessionUUID == "" {
sessionUUID = uuid.New().String()
}
gs.x.SLog.Debug("new request", slog.String("session-uuid", sessionUUID), slog.Group("connection", slog.String("ip", r.RemoteAddr)))
w.Header().Set("X-Session-UUID", sessionUUID)
if !gs.sm.Add(sessionUUID) {
gs.x.SLog.Debug("session is busy", slog.String("session-uuid", sessionUUID))
rpc.WriteError(w, rpc.NewError(rpc.ErrSessionIsBusy, rpc.ErrSessionIsBusyS, nil, nil))
return
}
defer gs.sm.Delete(sessionUUID)
body, err := io.ReadAll(r.Body)
if err != nil {
gs.x.SLog.Debug("failed to read body", slog.String("err", err.Error()))
w.WriteHeader(http.StatusBadRequest)
rpc.WriteError(w, &rpc.RPCResponse{
JSONRPC: rpc.JSONRPCVersion,
ID: nil,
Error: map[string]any{
"code": rpc.ErrInternalError,
"message": rpc.ErrInternalErrorS,
},
})
rpc.WriteError(w, rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, nil))
gs.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrInternalErrorS))
return
}
@@ -34,19 +47,17 @@ func (gs *GatewayServer) Handle(w http.ResponseWriter, r *http.Request) {
var single rpc.RPCRequest
if batch == nil {
if err := json.Unmarshal(body, &single); err != nil {
gs.x.SLog.Debug("failed to parse json", slog.String("err", err.Error()))
w.WriteHeader(http.StatusBadRequest)
rpc.WriteError(w, &rpc.RPCResponse{
JSONRPC: rpc.JSONRPCVersion,
ID: nil,
Error: map[string]any{
"code": rpc.ErrParseError,
"message": rpc.ErrParseErrorS,
},
})
rpc.WriteError(w, rpc.NewError(rpc.ErrParseError, rpc.ErrParseErrorS, nil, nil))
gs.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrParseErrorS))
return
}
resp := gs.Route(r, &single)
resp := gs.Route(ctx, sessionUUID, r, &single)
if resp == nil {
w.Write([]byte(""))
return
}
rpc.WriteResponse(w, resp)
return
}
@@ -58,7 +69,7 @@ func (gs *GatewayServer) Handle(w http.ResponseWriter, r *http.Request) {
wg.Add(1)
go func(req rpc.RPCRequest) {
defer wg.Done()
res := gs.Route(r, &req)
res := gs.Route(ctx, sessionUUID, r, &req)
if res != nil {
responses <- *res
}
@@ -73,29 +84,31 @@ func (gs *GatewayServer) Handle(w http.ResponseWriter, r *http.Request) {
}
if len(result) > 0 {
json.NewEncoder(w).Encode(result)
} else {
w.Write([]byte("[]"))
}
}
func (gs *GatewayServer) Route(r *http.Request, req *rpc.RPCRequest) (resp *rpc.RPCResponse) {
func (gs *GatewayServer) Route(ctx context.Context, sid string, r *http.Request, req *rpc.RPCRequest) (resp *rpc.RPCResponse) {
defer utils.CatchPanicWithFallback(func(rec any) {
gs.x.SLog.Error("panic caught in handler", slog.Any("error", rec))
resp = rpc.NewError(rpc.ErrInternalError, "Internal server error (panic)", req.ID)
resp = rpc.NewError(rpc.ErrInternalError, "Internal server error (panic)", nil, req.ID)
})
if req.JSONRPC != rpc.JSONRPCVersion {
gs.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrInvalidRequestS), slog.String("requested-version", req.JSONRPC))
return rpc.NewError(rpc.ErrInvalidRequest, rpc.ErrInvalidRequestS, req.ID)
return rpc.NewError(rpc.ErrInvalidRequest, rpc.ErrInvalidRequestS, nil, req.ID)
}
server, ok := gs.servers[serversApiVer(req.ContextVersion)]
if !ok {
gs.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrContextVersionS), slog.String("requested-version", req.ContextVersion))
return rpc.NewError(rpc.ErrContextVersion, rpc.ErrContextVersionS, req.ID)
return rpc.NewError(rpc.ErrContextVersion, rpc.ErrContextVersionS, nil, req.ID)
}
resp = server.Handle(r, req)
// checks if request is notification
if req.ID == nil {
go server.Handle(ctx, sid, r, req)
return nil
}
return resp
return server.Handle(ctx, sid, r, req)
}

View File

@@ -13,8 +13,16 @@ type RPCRequest struct {
type RPCResponse struct {
JSONRPC string `json:"jsonrpc"`
ID *json.RawMessage `json:"id"`
Result any `json:"result,omitempty"`
Error any `json:"error,omitempty"`
Result any `json:"result,omitzero"`
Error any `json:"error,omitzero"`
Data *RPCData `json:"data,omitzero"`
}
type RPCData struct {
ResponsibleNode string `json:"responsible-node,omitempty"`
Salt string `json:"salt,omitempty"`
Checksum string `json:"checksum-md5,omitempty"`
NewSessionUUID string `json:"new-session-uuid,omitempty"`
}
const (

View File

@@ -24,4 +24,7 @@ const (
ErrMethodIsMissing = -32020
ErrMethodIsMissingS = "Method is missing"
ErrSessionIsBusy = -32030
ErrSessionIsBusyS = "The session is busy"
)

View File

@@ -0,0 +1,60 @@
package rpc
import (
"crypto/md5"
"encoding/json"
"fmt"
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
"github.com/google/uuid"
)
func generateChecksum(result any) string {
if result == nil {
return ""
}
data, err := json.Marshal(result)
if err != nil {
return ""
}
return fmt.Sprintf("%x", md5.Sum(data))
}
func generateSalt() string {
return uuid.NewString()
}
func GetData(data any) *RPCData {
return &RPCData{
Salt: generateSalt(),
ResponsibleNode: corestate.NODE_UUID,
Checksum: generateChecksum(data),
}
}
func NewError(code int, message string, data any, id *json.RawMessage) *RPCResponse {
Error := make(map[string]any)
Error = map[string]any{
"code": code,
"message": message,
}
if data != nil {
Error["data"] = data
}
return &RPCResponse{
JSONRPC: JSONRPCVersion,
ID: id,
Error: Error,
Data: GetData(Error),
}
}
func NewResponse(result any, id *json.RawMessage) *RPCResponse {
return &RPCResponse{
JSONRPC: JSONRPCVersion,
ID: id,
Result: result,
Data: GetData(result),
}
}

View File

@@ -0,0 +1,47 @@
package session
import (
"sync"
"time"
)
type SessionManagerContract interface {
Add(uuid string) bool
Delete(uuid string)
StartCleanup(interval time.Duration)
}
type SessionManager struct {
sessions sync.Map
ttl time.Duration
}
func New(ttl time.Duration) *SessionManager {
return &SessionManager{
ttl: ttl,
}
}
func (sm *SessionManager) Add(uuid string) bool {
_, loaded := sm.sessions.LoadOrStore(uuid, time.Now().Add(sm.ttl))
return !loaded
}
func (sm *SessionManager) Delete(uuid string) {
sm.sessions.Delete(uuid)
}
func (sm *SessionManager) StartCleanup(interval time.Duration) {
go func() {
ticker := time.NewTicker(interval)
for range ticker.C {
sm.sessions.Range(func(key, value any) bool {
expiry := value.(time.Time)
if time.Now().After(expiry) {
sm.sessions.Delete(key)
}
return true
})
}
}()
}

View File

@@ -0,0 +1,415 @@
package sv1
import (
"database/sql"
"fmt"
"log/slog"
"sync"
lua "github.com/yuin/gopher-lua"
)
type DBConnection struct {
dbPath string
log bool
logger *slog.Logger
writeChan chan *dbWriteRequest
closeChan chan struct{}
}
type dbWriteRequest struct {
query string
args []interface{}
resCh chan *dbWriteResult
}
type dbWriteResult struct {
rowsAffected int64
err error
}
var dbMutexMap = make(map[string]*sync.RWMutex)
var dbGlobalMutex sync.Mutex
func getDBMutex(dbPath string) *sync.RWMutex {
dbGlobalMutex.Lock()
defer dbGlobalMutex.Unlock()
if mtx, ok := dbMutexMap[dbPath]; ok {
return mtx
}
mtx := &sync.RWMutex{}
dbMutexMap[dbPath] = mtx
return mtx
}
func loadDBMod(llog *slog.Logger, sid string) func(*lua.LState) int {
return func(L *lua.LState) int {
llog.Debug("import module db-sqlite")
dbMod := L.NewTable()
L.SetField(dbMod, "connect", L.NewFunction(func(L *lua.LState) int {
dbPath := L.CheckString(1)
logQueries := false
if L.GetTop() >= 2 {
opts := L.CheckTable(2)
if val := opts.RawGetString("log"); val != lua.LNil {
logQueries = lua.LVAsBool(val)
}
}
conn := &DBConnection{
dbPath: dbPath,
log: logQueries,
logger: llog,
writeChan: make(chan *dbWriteRequest, 100),
closeChan: make(chan struct{}),
}
go conn.processWrites()
ud := L.NewUserData()
ud.Value = conn
L.SetMetatable(ud, L.GetTypeMetatable("gosally_db"))
L.Push(ud)
return 1
}))
mt := L.NewTypeMetatable("gosally_db")
L.SetField(mt, "__index", L.SetFuncs(L.NewTable(), map[string]lua.LGFunction{
"exec": dbExec,
"query": dbQuery,
"query_row": dbQueryRow,
"close": dbClose,
}))
L.SetField(dbMod, "__seed", lua.LString(sid))
L.Push(dbMod)
return 1
}
}
func (conn *DBConnection) processWrites() {
for {
select {
case req := <-conn.writeChan:
mtx := getDBMutex(conn.dbPath)
mtx.Lock()
db, err := sql.Open("sqlite", conn.dbPath+"?_busy_timeout=5000&_journal_mode=WAL&_sync=NORMAL&_cache_size=-10000")
if err == nil {
_, err = db.Exec("PRAGMA journal_mode=WAL;")
if err == nil {
res, execErr := db.Exec(req.query, req.args...)
if execErr == nil {
rows, _ := res.RowsAffected()
req.resCh <- &dbWriteResult{rowsAffected: rows}
} else {
req.resCh <- &dbWriteResult{err: execErr}
}
}
db.Close()
}
if err != nil {
req.resCh <- &dbWriteResult{err: err}
}
mtx.Unlock()
case <-conn.closeChan:
return
}
}
}
func dbExec(L *lua.LState) int {
ud := L.CheckUserData(1)
conn, ok := ud.Value.(*DBConnection)
if !ok {
L.Push(lua.LNil)
L.Push(lua.LString("invalid database connection"))
return 2
}
query := L.CheckString(2)
var args []any
if L.GetTop() >= 3 {
params := L.CheckTable(3)
params.ForEach(func(k lua.LValue, v lua.LValue) {
args = append(args, ConvertLuaTypesToGolang(v))
})
}
if conn.log {
conn.logger.Info("DB Exec",
slog.String("query", query),
slog.Any("params", args))
}
resCh := make(chan *dbWriteResult, 1)
conn.writeChan <- &dbWriteRequest{
query: query,
args: args,
resCh: resCh,
}
ctx := L.NewTable()
L.SetField(ctx, "done", lua.LBool(false))
var result lua.LValue = lua.LNil
var errorMsg lua.LValue = lua.LNil
L.SetField(ctx, "wait", L.NewFunction(func(L *lua.LState) int {
res := <-resCh
L.SetField(ctx, "done", lua.LBool(true))
if res.err != nil {
errorMsg = lua.LString(res.err.Error())
result = lua.LNil
} else {
result = lua.LNumber(res.rowsAffected)
errorMsg = lua.LNil
}
if res.err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(res.err.Error()))
return 2
}
L.Push(lua.LNumber(res.rowsAffected))
L.Push(lua.LNil)
return 2
}))
L.SetField(ctx, "check", L.NewFunction(func(L *lua.LState) int {
select {
case res := <-resCh:
L.SetField(ctx, "done", lua.LBool(true))
if res.err != nil {
errorMsg = lua.LString(res.err.Error())
result = lua.LNil
L.Push(lua.LNil)
L.Push(lua.LString(res.err.Error()))
return 2
} else {
result = lua.LNumber(res.rowsAffected)
errorMsg = lua.LNil
L.Push(lua.LNumber(res.rowsAffected))
L.Push(lua.LNil)
return 2
}
default:
L.Push(result)
L.Push(errorMsg)
return 2
}
}))
L.Push(ctx)
L.Push(lua.LNil)
return 2
}
func dbQueryRow(L *lua.LState) int {
ud := L.CheckUserData(1)
conn, ok := ud.Value.(*DBConnection)
if !ok {
L.Push(lua.LNil)
L.Push(lua.LString("invalid database connection"))
return 2
}
query := L.CheckString(2)
var args []any
if L.GetTop() >= 3 {
params := L.CheckTable(3)
params.ForEach(func(k lua.LValue, v lua.LValue) {
args = append(args, ConvertLuaTypesToGolang(v))
})
}
if conn.log {
conn.logger.Info("DB QueryRow",
slog.String("query", query),
slog.Any("params", args))
}
mtx := getDBMutex(conn.dbPath)
mtx.RLock()
defer mtx.RUnlock()
db, err := sql.Open("sqlite", conn.dbPath+"?_busy_timeout=5000&_journal_mode=WAL&_sync=NORMAL&_cache_size=-10000")
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
defer db.Close()
row := db.QueryRow(query, args...)
columns := []string{}
stmt, err := db.Prepare(query)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(fmt.Sprintf("prepare failed: %v", err)))
return 2
}
defer stmt.Close()
rows, err := stmt.Query(args...)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(fmt.Sprintf("query failed: %v", err)))
return 2
}
defer rows.Close()
cols, err := rows.Columns()
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(fmt.Sprintf("get columns failed: %v", err)))
return 2
}
for _, c := range cols {
columns = append(columns, c)
}
colCount := len(columns)
values := make([]any, colCount)
valuePtrs := make([]any, colCount)
for i := range columns {
valuePtrs[i] = &values[i]
}
err = row.Scan(valuePtrs...)
if err != nil {
if err == sql.ErrNoRows {
L.Push(lua.LNil)
return 1
}
L.Push(lua.LNil)
L.Push(lua.LString(fmt.Sprintf("scan failed: %v", err)))
return 2
}
rowTable := L.NewTable()
for i, col := range columns {
val := values[i]
if val == nil {
L.SetField(rowTable, col, lua.LNil)
} else {
L.SetField(rowTable, col, ConvertGolangTypesToLua(L, val))
}
}
L.Push(rowTable)
return 1
}
func dbQuery(L *lua.LState) int {
ud := L.CheckUserData(1)
conn, ok := ud.Value.(*DBConnection)
if !ok {
L.Push(lua.LNil)
L.Push(lua.LString("invalid database connection"))
return 2
}
query := L.CheckString(2)
var args []any
if L.GetTop() >= 3 {
params := L.CheckTable(3)
params.ForEach(func(k lua.LValue, v lua.LValue) {
args = append(args, ConvertLuaTypesToGolang(v))
})
}
if conn.log {
conn.logger.Info("DB Query",
slog.String("query", query),
slog.Any("params", args))
}
mtx := getDBMutex(conn.dbPath)
mtx.RLock()
defer mtx.RUnlock()
db, err := sql.Open("sqlite", conn.dbPath+"?_busy_timeout=5000&_journal_mode=WAL&_sync=NORMAL&_cache_size=-10000")
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
defer db.Close()
rows, err := db.Query(query, args...)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(fmt.Sprintf("query failed: %v", err)))
return 2
}
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(fmt.Sprintf("get columns failed: %v", err)))
return 2
}
result := L.NewTable()
colCount := len(columns)
values := make([]any, colCount)
valuePtrs := make([]any, colCount)
for rows.Next() {
for i := range columns {
valuePtrs[i] = &values[i]
}
if err := rows.Scan(valuePtrs...); err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(fmt.Sprintf("scan failed: %v", err)))
return 2
}
rowTable := L.NewTable()
for i, col := range columns {
val := values[i]
if val == nil {
L.SetField(rowTable, col, lua.LNil)
} else {
L.SetField(rowTable, col, ConvertGolangTypesToLua(L, val))
}
}
result.Append(rowTable)
}
if err := rows.Err(); err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(fmt.Sprintf("rows iteration failed: %v", err)))
return 2
}
L.Push(result)
return 1
}
func dbClose(L *lua.LState) int {
ud := L.CheckUserData(1)
conn, ok := ud.Value.(*DBConnection)
if !ok {
L.Push(lua.LFalse)
L.Push(lua.LString("invalid database connection"))
return 2
}
close(conn.closeChan)
L.Push(lua.LTrue)
return 1
}

View File

@@ -1,28 +1,39 @@
package sv1
import (
"context"
"log/slog"
"net/http"
"github.com/akyaiy/GoSally-mvp/internal/server/rpc"
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
)
func (h *HandlerV1) Handle(r *http.Request, req *rpc.RPCRequest) *rpc.RPCResponse {
func (h *HandlerV1) Handle(_ context.Context, sid string, r *http.Request, req *rpc.RPCRequest) *rpc.RPCResponse {
if req.Method == "" {
h.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrMethodNotFoundS), slog.String("requested-method", req.Method))
return rpc.NewError(rpc.ErrMethodIsMissing, rpc.ErrMethodIsMissingS, req.ID)
return rpc.NewError(rpc.ErrMethodIsMissing, rpc.ErrMethodIsMissingS, nil, req.ID)
}
method, err := h.resolveMethodPath(req.Method)
if err != nil {
if err.Error() == rpc.ErrInvalidMethodFormatS {
h.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrInvalidMethodFormatS), slog.String("requested-method", req.Method))
return rpc.NewError(rpc.ErrInvalidMethodFormat, rpc.ErrInvalidMethodFormatS, req.ID)
return rpc.NewError(rpc.ErrInvalidMethodFormat, rpc.ErrInvalidMethodFormatS, nil, req.ID)
} else if err.Error() == rpc.ErrMethodNotFoundS {
h.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrMethodNotFoundS), slog.String("requested-method", req.Method))
return rpc.NewError(rpc.ErrMethodNotFound, rpc.ErrMethodNotFoundS, req.ID)
return rpc.NewError(rpc.ErrMethodNotFound, rpc.ErrMethodNotFoundS, nil, req.ID)
}
}
return h.handleLUA(method, req)
switch req.Params.(type) {
case map[string]any, []any, nil:
return h.handleLUA(sid, r, req, method)
default:
// JSON-RPC 2.0 Specification:
// https://www.jsonrpc.org/specification#parameter_structures
//
// "params" MUST be either an *array* or an *object* if included.
// Any other type (e.g., a number, string, or boolean) is INVALID.
h.x.SLog.Info("invalid request received", slog.String("issue", rpc.ErrInvalidParamsS))
return rpc.NewError(rpc.ErrInvalidParams, rpc.ErrInvalidParamsS, nil, req.ID)
}
}

View File

@@ -0,0 +1,86 @@
package sv1
import (
"log/slog"
"time"
"github.com/golang-jwt/jwt/v5"
lua "github.com/yuin/gopher-lua"
)
func loadJWTMod(llog *slog.Logger, sid string) func(*lua.LState) int {
return func(L *lua.LState) int {
llog.Debug("import module jwt")
jwtMod := L.NewTable()
L.SetField(jwtMod, "encode", L.NewFunction(jwtEncode))
L.SetField(jwtMod, "decode", L.NewFunction(jwtDecode))
L.SetField(jwtMod, "__seed", lua.LString(sid))
L.Push(jwtMod)
return 1
}
}
func jwtEncode(L *lua.LState) int {
payloadTbl := L.CheckTable(1)
secret := L.GetField(payloadTbl, "secret").String()
payload := L.GetField(payloadTbl, "payload").(*lua.LTable)
expiresIn := L.GetField(payloadTbl, "expires_in")
expDuration := time.Hour
if expiresIn.Type() == lua.LTNumber {
floatVal := ConvertLuaTypesToGolang(expiresIn).(float64)
expDuration = time.Duration(floatVal) * time.Second
}
claims := jwt.MapClaims{}
payload.ForEach(func(key, value lua.LValue) {
claims[key.String()] = ConvertLuaTypesToGolang(value)
})
claims["iat"] = time.Now().Unix()
claims["exp"] = time.Now().Add(expDuration).Unix()
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
signedToken, err := token.SignedString([]byte(secret))
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
L.Push(lua.LString(signedToken))
return 1
}
func jwtDecode(L *lua.LState) int {
tokenString := L.CheckString(1)
optsTbl := L.OptTable(2, L.NewTable())
secret := L.GetField(optsTbl, "secret").String()
token, err := jwt.Parse(tokenString, func(t *jwt.Token) (any, error) {
return []byte(secret), nil
})
if err != nil || !token.Valid {
L.Push(lua.LString("Invalid token: " + err.Error()))
L.Push(lua.LNil)
return 2
}
claims, ok := token.Claims.(jwt.MapClaims)
if !ok {
L.Push(lua.LString("Invalid claims"))
L.Push(lua.LNil)
return 2
}
luaTable := L.NewTable()
for k, v := range claims {
luaTable.RawSetString(k, ConvertGolangTypesToLua(L, v))
}
L.Push(lua.LNil)
L.Push(luaTable)
return 2
}

View File

@@ -0,0 +1,636 @@
package sv1
// TODO: make a lua state pool using sync.Pool
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"log/slog"
"math/rand/v2"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"golang.org/x/crypto/bcrypt"
"github.com/akyaiy/GoSally-mvp/src/internal/colors"
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
lua "github.com/yuin/gopher-lua"
_ "modernc.org/sqlite"
)
func addInitiatorHeaders(sid string, req *http.Request, headers http.Header) {
clientIP := req.RemoteAddr
if forwardedFor := req.Header.Get("X-Forwarded-For"); forwardedFor != "" {
clientIP = forwardedFor
}
headers.Set("X-Initiator-IP", clientIP)
headers.Set("X-Session-UUID", sid)
headers.Set("X-Initiator-Host", req.Host)
headers.Set("X-Initiator-User-Agent", req.UserAgent())
headers.Set("X-Initiator-Referer", req.Referer())
}
// A small reminder: this code is only at the MVP stage,
// and some parts of the code may cause shock from the
// incompetence of the developer. But, in the end,
// this code is just an idea. If there is a desire to
// contribute to the development of the code,
// I will be only glad.
// TODO: make this huge function more harmonious by dividing responsibilities
func (h *HandlerV1) handleLUA(sid string, r *http.Request, req *rpc.RPCRequest, path string) *rpc.RPCResponse {
var __exit = -1
llog := h.x.SLog.With(slog.String("session-id", sid))
llog.Debug("handling LUA")
L := lua.NewState()
defer L.Close()
osMod := L.GetGlobal("os").(*lua.LTable)
L.SetField(osMod, "exit", lua.LNil)
ioMod := L.GetGlobal("io").(*lua.LTable)
for _, k := range []string{"write", "output", "flush", "read", "input"} {
ioMod.RawSetString(k, lua.LNil)
}
L.Env.RawSetString("print", lua.LNil)
for _, name := range []string{"stdout", "stderr", "stdin"} {
stream := ioMod.RawGetString(name)
if t, ok := stream.(*lua.LUserData); ok {
t.Metatable = lua.LNil
}
}
seed := rand.Int()
loadSessionMod := func(L *lua.LState) int {
llog.Debug("import module session", slog.String("script", path))
sessionMod := L.NewTable()
inTable := L.NewTable()
paramsTable := L.NewTable()
headersTable := L.NewTable()
fetchedHeadersTable := L.NewTable()
for k, v := range r.Header {
L.SetField(fetchedHeadersTable, k, ConvertGolangTypesToLua(L, v))
}
headersGetter := L.NewFunction(func(L *lua.LState) int {
path := L.OptString(1, "")
def := L.Get(2)
get := func(path string) lua.LValue {
if path == "" {
return fetchedHeadersTable
}
fetched := r.Header.Get(path)
if fetched == "" {
return lua.LNil
}
return lua.LString(fetched)
}
val := get(path)
if val == lua.LNil && def != lua.LNil {
L.Push(def)
} else {
L.Push(val)
}
return 1
})
L.SetField(headersTable, "__fetched", fetchedHeadersTable)
L.SetField(headersTable, "get", headersGetter)
L.SetField(inTable, "headers", headersTable)
fetchedParamsTable := L.NewTable()
switch params := req.Params.(type) {
case map[string]any:
for k, v := range params {
L.SetField(fetchedParamsTable, k, ConvertGolangTypesToLua(L, v))
}
case []any:
for i, v := range params {
fetchedParamsTable.RawSetInt(i+1, ConvertGolangTypesToLua(L, v))
}
}
paramsGetter := L.NewFunction(func(L *lua.LState) int {
path := L.OptString(1, "")
def := L.Get(2)
get := func(tbl *lua.LTable, path string) lua.LValue {
if path == "" {
return tbl
}
current := tbl
parts := strings.Split(path, ".")
size := len(parts)
for index, key := range parts {
val := current.RawGetString(key)
if tblVal, ok := val.(*lua.LTable); ok {
current = tblVal
} else {
if index == size-1 {
return val
}
return lua.LNil
}
}
return current
}
paramsTbl := L.GetField(paramsTable, "__fetched") //
val := get(paramsTbl.(*lua.LTable), path) //
if val == lua.LNil && def != lua.LNil {
L.Push(def)
} else {
L.Push(val)
}
return 1
})
L.SetField(paramsTable, "__fetched", fetchedParamsTable)
L.SetField(paramsTable, "get", paramsGetter)
L.SetField(inTable, "params", paramsTable)
outTable := L.NewTable()
scriptDataTable := L.NewTable()
L.SetField(outTable, "__script_data", scriptDataTable)
L.SetField(inTable, "address", lua.LString(r.RemoteAddr))
L.SetField(sessionMod, "throw_error", L.NewFunction(func(L *lua.LState) int {
arg := L.Get(1)
var msg string
switch arg.Type() {
case lua.LTString:
msg = arg.String()
case lua.LTNumber:
msg = strconv.FormatFloat(float64(arg.(lua.LNumber)), 'f', -1, 64)
default:
L.ArgError(1, "expected string or number")
return 0
}
L.RaiseError("%s", msg)
return 0
}))
resTable := L.NewTable()
L.SetField(scriptDataTable, "result", resTable)
L.SetField(outTable, "send", L.NewFunction(func(L *lua.LState) int {
res := L.Get(1)
resFTable := scriptDataTable.RawGetString("result")
if resPTable, ok := res.(*lua.LTable); ok {
resPTable.ForEach(func(key, value lua.LValue) {
L.SetField(resFTable, key.String(), value)
})
} else {
L.SetField(scriptDataTable, "result", res)
}
__exit = 0
L.RaiseError("__successfull")
return 0
}))
L.SetField(outTable, "set", L.NewFunction(func(L *lua.LState) int {
res := L.Get(1)
if res == lua.LNil {
return 0
}
resFTable := scriptDataTable.RawGetString("result")
if resPTable, ok := res.(*lua.LTable); ok {
resPTable.ForEach(func(key, value lua.LValue) {
L.SetField(resFTable, key.String(), value)
})
} else {
L.SetField(scriptDataTable, "result", res)
}
return 0
}))
errTable := L.NewTable()
L.SetField(scriptDataTable, "error", errTable)
L.SetField(outTable, "send_error", L.NewFunction(func(L *lua.LState) int {
var params [3]lua.LValue
for i := range 3 {
params[i] = L.Get(i + 1)
}
if errTable, ok := scriptDataTable.RawGetString("error").(*lua.LTable); ok {
for _, v := range params {
switch v.Type() {
case lua.LTNumber:
if n, ok := v.(lua.LNumber); ok {
L.SetField(errTable, "code", n)
}
case lua.LTString:
if s, ok := v.(lua.LString); ok {
L.SetField(errTable, "message", s)
}
case lua.LTTable:
if tbl, ok := v.(*lua.LTable); ok {
L.SetField(errTable, "data", tbl)
}
}
}
}
__exit = 1
L.RaiseError("__unsuccessfull")
return 0
}))
L.SetField(outTable, "set_error", L.NewFunction(func(L *lua.LState) int {
var params [3]lua.LValue
for i := range 3 {
params[i] = L.Get(i + 1)
}
if errTable, ok := scriptDataTable.RawGetString("error").(*lua.LTable); ok {
for _, v := range params {
switch v.Type() {
case lua.LTNumber:
if n, ok := v.(lua.LNumber); ok {
L.SetField(errTable, "code", n)
}
case lua.LTString:
if s, ok := v.(lua.LString); ok {
L.SetField(errTable, "message", s)
}
case lua.LTTable:
if tbl, ok := v.(*lua.LTable); ok {
L.SetField(errTable, "data", tbl)
}
}
}
}
return 0
}))
L.SetField(sessionMod, "request", inTable)
L.SetField(sessionMod, "response", outTable)
L.SetField(sessionMod, "id", lua.LString(sid))
L.SetField(sessionMod, "__seed", lua.LString(fmt.Sprint(seed)))
L.Push(sessionMod)
return 1
}
loadLogMod := func(L *lua.LState) int {
llog.Debug("import module log", slog.String("script", path))
logMod := L.NewTable()
logFuncs := map[string]func(string, ...any){
"info": llog.Info,
"debug": llog.Debug,
"error": llog.Error,
"warn": llog.Warn,
}
for name, logFunc := range logFuncs {
fun := logFunc
L.SetField(logMod, name, L.NewFunction(func(L *lua.LState) int {
msg := L.Get(1)
converted := ConvertLuaTypesToGolang(msg)
fun(fmt.Sprintf("the script says: %s", converted), slog.String("script", path))
return 0
}))
}
for _, fn := range []struct {
field string
pfunc func(string, ...any)
color func() string
}{
{"event", h.x.Log.Printf, nil},
{"event_error", h.x.Log.Printf, colors.PrintError},
{"event_warn", h.x.Log.Printf, colors.PrintWarn},
} {
L.SetField(logMod, fn.field, L.NewFunction(func(L *lua.LState) int {
msg := L.Get(1)
converted := ConvertLuaTypesToGolang(msg)
if fn.color != nil {
h.x.Log.Printf("%s: %s: %s", fn.color(), path, converted)
} else {
h.x.Log.Printf("%s: %s", path, converted)
}
return 0
}))
}
L.SetField(logMod, "__seed", lua.LString(fmt.Sprint(seed)))
L.Push(logMod)
return 1
}
loadNetMod := func(L *lua.LState) int {
llog.Debug("import module net", slog.String("script", path))
netMod := L.NewTable()
netModhttp := L.NewTable()
L.SetField(netModhttp, "get_request", L.NewFunction(func(L *lua.LState) int {
logRequest := L.ToBool(1)
url := L.ToString(2)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
addInitiatorHeaders(sid, r, req.Header)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
if logRequest {
llog.Info("HTTP GET request",
slog.String("script", path),
slog.String("url", url),
slog.Int("status", resp.StatusCode),
slog.String("status_text", resp.Status),
slog.String("initiator_ip", req.Header.Get("X-Initiator-IP")),
)
}
result := L.NewTable()
L.SetField(result, "status", lua.LNumber(resp.StatusCode))
L.SetField(result, "status_text", lua.LString(resp.Status))
L.SetField(result, "body", lua.LString(body))
L.SetField(result, "content_length", lua.LNumber(resp.ContentLength))
headers := L.NewTable()
for k, v := range resp.Header {
L.SetField(headers, k, ConvertGolangTypesToLua(L, v))
}
L.SetField(result, "headers", headers)
L.Push(result)
return 1
}))
L.SetField(netModhttp, "post_request", L.NewFunction(func(L *lua.LState) int {
logRequest := L.ToBool(1)
url := L.ToString(2)
contentType := L.ToString(3)
payload := L.ToString(4)
body := strings.NewReader(payload)
req, err := http.NewRequest("POST", url, body)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
req.Header.Set("Content-Type", contentType)
addInitiatorHeaders(sid, r, req.Header)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
if logRequest {
llog.Info("HTTP POST request",
slog.String("script", path),
slog.String("url", url),
slog.String("content_type", contentType),
slog.Int("status", resp.StatusCode),
slog.String("status_text", resp.Status),
slog.String("initiator_ip", req.Header.Get("X-Initiator-IP")),
)
}
result := L.NewTable()
L.SetField(result, "status", lua.LNumber(resp.StatusCode))
L.SetField(result, "status_text", lua.LString(resp.Status))
L.SetField(result, "body", lua.LString(respBody))
L.SetField(result, "content_length", lua.LNumber(resp.ContentLength))
headers := L.NewTable()
for k, v := range resp.Header {
L.SetField(headers, k, ConvertGolangTypesToLua(L, v))
}
L.SetField(result, "headers", headers)
L.Push(result)
return 1
}))
L.SetField(netMod, "http", netModhttp)
L.SetField(netMod, "__seed", lua.LString(fmt.Sprint(seed)))
L.Push(netMod)
return 1
}
loadCryptbcryptMod := func(L *lua.LState) int {
llog.Debug("import module crypt.bcrypt", slog.String("script", path))
bcryptMod := L.NewTable()
L.SetField(bcryptMod, "MinCost", lua.LNumber(bcrypt.MinCost))
L.SetField(bcryptMod, "MaxCost", lua.LNumber(bcrypt.MaxCost))
L.SetField(bcryptMod, "DefaultCost", lua.LNumber(bcrypt.DefaultCost))
L.SetField(bcryptMod, "generate", L.NewFunction(func(l *lua.LState) int {
password := ConvertLuaTypesToGolang(L.Get(1))
passwordStr, ok := password.(string)
if !ok {
L.Push(lua.LNil)
L.Push(lua.LString("error: password must be a string"))
return 2
}
cost := ConvertLuaTypesToGolang(L.Get(2))
costInt := bcrypt.DefaultCost
switch v := cost.(type) {
case int:
costInt = v
case float64:
costInt = int(v)
case nil:
// ok, use DefaultCost
default:
L.Push(lua.LNil)
L.Push(lua.LString("error: cost must be an integer"))
return 2
}
hashBytes, err := bcrypt.GenerateFromPassword([]byte(passwordStr), costInt)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString("error: " + err.Error()))
return 2
}
L.Push(lua.LString(string(hashBytes)))
L.Push(lua.LNil)
return 2
}))
L.SetField(bcryptMod, "compare", L.NewFunction(func(l *lua.LState) int {
hash := ConvertLuaTypesToGolang(L.Get(1))
hashStr, ok := hash.(string)
if !ok {
L.Push(lua.LString("error: hash must be a string"))
return 1
}
password := ConvertLuaTypesToGolang(L.Get(2))
passwordStr, ok := password.(string)
if !ok {
L.Push(lua.LString("error: password must be a string"))
return 1
}
err := bcrypt.CompareHashAndPassword([]byte(hashStr), []byte(passwordStr))
if err != nil {
L.Push(lua.LFalse)
return 1
}
L.Push(lua.LTrue)
return 1
}))
L.SetField(bcryptMod, "__seed", lua.LString(fmt.Sprint(seed)))
L.Push(bcryptMod)
return 1
}
loadCryptbsha256Mod := func(L *lua.LState) int {
llog.Debug("import module crypt.sha256", slog.String("script", path))
sha265mod := L.NewTable()
L.SetField(sha265mod, "hash", L.NewFunction(func(l *lua.LState) int {
data := ConvertLuaTypesToGolang(L.Get(1))
var dataStr = fmt.Sprint(data)
hash := sha256.Sum256([]byte(dataStr))
L.Push(lua.LString(hex.EncodeToString(hash[:])))
L.Push(lua.LNil)
return 2
}))
L.SetField(sha265mod, "__seed", lua.LString(fmt.Sprint(seed)))
L.Push(sha265mod)
return 1
}
L.PreloadModule("internal.session", loadSessionMod)
L.PreloadModule("internal.log", loadLogMod)
L.PreloadModule("internal.net", loadNetMod)
L.PreloadModule("internal.database.sqlite", loadDBMod(llog, fmt.Sprint(seed)))
L.PreloadModule("internal.crypt.bcrypt", loadCryptbcryptMod)
L.PreloadModule("internal.crypt.sha256", loadCryptbsha256Mod)
L.PreloadModule("internal.crypt.jwt", loadJWTMod(llog, fmt.Sprint(seed)))
llog.Debug("preparing environment")
prep := filepath.Join(*h.x.Config.Conf.Node.ComDir, "_prepare.lua")
if _, err := os.Stat(prep); err == nil {
if err := L.DoFile(prep); err != nil {
llog.Error("script error", slog.String("script", path), slog.String("error", err.Error()))
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
}
}
llog.Debug("executing script", slog.String("script", path))
err := L.DoFile(path)
if err != nil && __exit != 0 && __exit != 1 {
llog.Error("script error", slog.String("script", path), slog.String("error", err.Error()))
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
}
pkg := L.GetGlobal("package")
pkgTbl, ok := pkg.(*lua.LTable)
if !ok {
llog.Error("script error", slog.String("script", path), slog.String("error", "package not found"))
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
}
loaded := pkgTbl.RawGetString("loaded")
loadedTbl, ok := loaded.(*lua.LTable)
if !ok {
llog.Error("script error", slog.String("script", path), slog.String("error", "package.loaded not found"))
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
}
sessionVal := loadedTbl.RawGetString("internal.session")
sessionTbl, ok := sessionVal.(*lua.LTable)
if !ok {
return rpc.NewResponse(nil, req.ID)
}
tag := sessionTbl.RawGetString("__seed")
if tag.Type() != lua.LTString || tag.String() != fmt.Sprint(seed) {
llog.Debug("stock session module is not imported: wrong seed", slog.String("script", path))
return rpc.NewResponse(nil, req.ID)
}
outVal := sessionTbl.RawGetString("response")
outTbl, ok := outVal.(*lua.LTable)
if !ok {
llog.Error("script error", slog.String("script", path), slog.String("error", "response is not a table"))
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
}
if scriptDataTable, ok := outTbl.RawGetString("__script_data").(*lua.LTable); ok {
switch __exit {
case 1:
if errTbl, ok := scriptDataTable.RawGetString("error").(*lua.LTable); ok {
llog.Debug("catch error table", slog.String("script", path))
code := rpc.ErrInternalError
message := rpc.ErrInternalErrorS
if c := errTbl.RawGetString("code"); c.Type() == lua.LTNumber {
code = int(c.(lua.LNumber))
}
if msg := errTbl.RawGetString("message"); msg.Type() == lua.LTString {
message = msg.String()
}
data := ConvertLuaTypesToGolang(errTbl.RawGetString("data"))
llog.Error("the script terminated with an error", slog.Int("code", code), slog.String("message", message), slog.Any("data", data))
return rpc.NewError(code, message, data, req.ID)
}
return rpc.NewError(rpc.ErrInternalError, rpc.ErrInternalErrorS, nil, req.ID)
case 0:
resVal := ConvertLuaTypesToGolang(scriptDataTable.RawGetString("result"))
return rpc.NewResponse(resVal, req.ID)
}
}
return rpc.NewResponse(nil, req.ID)
}

View File

@@ -0,0 +1,126 @@
package sv1
import (
"fmt"
"reflect"
"strconv"
lua "github.com/yuin/gopher-lua"
)
func ConvertLuaTypesToGolang(value lua.LValue) any {
switch value.Type() {
case lua.LTString:
return value.String()
case lua.LTNumber:
return float64(value.(lua.LNumber))
case lua.LTBool:
return bool(value.(lua.LBool))
case lua.LTTable:
tbl := value.(*lua.LTable)
maxIdx := 0
isArray := true
var isNumeric = false
tbl.ForEach(func(key, _ lua.LValue) {
var numKey lua.LValue
var ok bool
switch key.Type() {
case lua.LTString:
numKey, ok = key.(lua.LString)
if !ok {
isArray = false
return
}
case lua.LTNumber:
numKey, ok = key.(lua.LNumber)
if !ok {
isArray = false
return
}
isNumeric = true
}
num, err := strconv.Atoi(numKey.String())
if err != nil {
isArray = false
return
}
if num < 1 {
isArray = false
return
}
if num > maxIdx {
maxIdx = num
}
})
if isArray {
arr := make([]any, maxIdx)
if isNumeric {
for i := 1; i <= maxIdx; i++ {
arr[i-1] = ConvertLuaTypesToGolang(tbl.RawGetInt(i))
}
} else {
for i := 1; i <= maxIdx; i++ {
arr[i-1] = ConvertLuaTypesToGolang(tbl.RawGetString(strconv.Itoa(i)))
}
}
return arr
}
result := make(map[string]any)
tbl.ForEach(func(key, val lua.LValue) {
result[key.String()] = ConvertLuaTypesToGolang(val)
})
return result
case lua.LTNil:
return nil
default:
return value.String()
}
}
func ConvertGolangTypesToLua(L *lua.LState, val any) lua.LValue {
if val == nil {
return lua.LNil
}
rv := reflect.ValueOf(val)
rt := rv.Type()
switch rt.Kind() {
case reflect.String:
return lua.LString(rv.String())
case reflect.Bool:
return lua.LBool(rv.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return lua.LNumber(rv.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return lua.LNumber(rv.Uint())
case reflect.Float32, reflect.Float64:
return lua.LNumber(rv.Float())
case reflect.Slice, reflect.Array:
tbl := L.NewTable()
for i := 0; i < rv.Len(); i++ {
tbl.RawSetInt(i+1, ConvertGolangTypesToLua(L, rv.Index(i).Interface()))
}
return tbl
case reflect.Map:
if rt.Key().Kind() == reflect.String {
tbl := L.NewTable()
for _, key := range rv.MapKeys() {
val := rv.MapIndex(key)
tbl.RawSetString(key.String(), ConvertGolangTypesToLua(L, val.Interface()))
}
return tbl
}
default:
return lua.LString(fmt.Sprintf("%v", val))
}
return lua.LString(fmt.Sprintf("%v", val))
}

View File

@@ -6,17 +6,19 @@ import (
"path/filepath"
"strings"
"github.com/akyaiy/GoSally-mvp/internal/server/rpc"
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
)
var RPCMethodSeparator = "."
func (h *HandlerV1) resolveMethodPath(method string) (string, error) {
if !h.allowedCmd.MatchString(method) {
return "", errors.New(rpc.ErrInvalidMethodFormatS)
}
parts := strings.Split(method, ">")
parts := strings.Split(method, RPCMethodSeparator)
relPath := filepath.Join(parts...) + ".lua"
fullPath := filepath.Join(h.x.Config.Conf.ComDir, relPath)
fullPath := filepath.Join(*h.x.Config.Conf.Node.ComDir, relPath)
if _, err := os.Stat(fullPath); os.IsNotExist(err) {
return "", errors.New(rpc.ErrMethodNotFoundS)

View File

@@ -5,10 +5,12 @@ package sv1
import (
"regexp"
"github.com/akyaiy/GoSally-mvp/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/internal/engine/app"
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
)
var SV1Version = "v1"
// HandlerV1InitStruct structure is only for initialization
type HandlerV1InitStruct struct {
Ver string

View File

@@ -0,0 +1,12 @@
package sv2
import (
"context"
"net/http"
"github.com/akyaiy/GoSally-mvp/src/internal/server/rpc"
)
func (h *Handler) Handle(_ context.Context, sid string, r *http.Request, req *rpc.RPCRequest) *rpc.RPCResponse {
return nil
}

View File

@@ -0,0 +1,43 @@
// SV2 works with binaries, scripts, and anything else that has access to stdin/stdout.
// Modules run in a separate process and communicate via I/O.
package sv2
import (
"regexp"
"github.com/akyaiy/GoSally-mvp/src/internal/core/corestate"
"github.com/akyaiy/GoSally-mvp/src/internal/engine/app"
)
// HandlerV2InitStruct structure is only for initialization
type HandlerInitStruct struct {
Ver string
CS *corestate.CoreState
X *app.AppX
AllowedCmd *regexp.Regexp
}
type Handler struct {
cs *corestate.CoreState
x *app.AppX
// allowedCmd and listAllowedCmd are regular expressions used to validate command names.
allowedCmd *regexp.Regexp
ver string
}
func InitServer(o *HandlerInitStruct) *Handler {
return &Handler{
cs: o.CS,
x: o.X,
allowedCmd: o.AllowedCmd,
ver: o.Ver,
}
}
// GetVersion returns the API version of the HandlerV1, which is set during initialization.
// This version is used to identify the API version in the request routing.
func (h *Handler) GetVersion() string {
return h.ver
}

11
src/main.go Normal file
View File

@@ -0,0 +1,11 @@
// Package main used only for calling cmd.Execute()
package main
import (
"github.com/akyaiy/GoSally-mvp/src/cmd"
_ "modernc.org/sqlite"
)
func main() {
cmd.Execute()
}

View File

@@ -1,21 +0,0 @@
sudo: false
language: go
go_import_path: github.com/dustin/go-humanize
go:
- 1.13.x
- 1.14.x
- 1.15.x
- 1.16.x
- stable
- master
matrix:
allow_failures:
- go: master
fast_finish: true
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- diff -u <(echo -n) <(gofmt -d -s .)
- go vet .
- go install -v -race ./...
- go test -v -race ./...

View File

@@ -1,21 +0,0 @@
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
<http://www.opensource.org/licenses/mit-license.php>

View File

@@ -1,124 +0,0 @@
# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
Just a few functions for helping humanize times and sizes.
`go get` it as `github.com/dustin/go-humanize`, import it as
`"github.com/dustin/go-humanize"`, use it as `humanize`.
See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for
complete documentation.
## Sizes
This lets you take numbers like `82854982` and convert them to useful
strings like, `83 MB` or `79 MiB` (whichever you prefer).
Example:
```go
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
```
## Times
This lets you take a `time.Time` and spit it out in relative terms.
For example, `12 seconds ago` or `3 days from now`.
Example:
```go
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
```
Thanks to Kyle Lemons for the time implementation from an IRC
conversation one day. It's pretty neat.
## Ordinals
From a [mailing list discussion][odisc] where a user wanted to be able
to label ordinals.
0 -> 0th
1 -> 1st
2 -> 2nd
3 -> 3rd
4 -> 4th
[...]
Example:
```go
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
```
## Commas
Want to shove commas into numbers? Be my guest.
0 -> 0
100 -> 100
1000 -> 1,000
1000000000 -> 1,000,000,000
-100000 -> -100,000
Example:
```go
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
```
## Ftoa
Nicer float64 formatter that removes trailing zeros.
```go
fmt.Printf("%f", 2.24) // 2.240000
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
fmt.Printf("%f", 2.0) // 2.000000
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
```
## SI notation
Format numbers with [SI notation][sinotation].
Example:
```go
humanize.SI(0.00000000223, "M") // 2.23 nM
```
## English-specific functions
The following functions are in the `humanize/english` subpackage.
### Plurals
Simple English pluralization
```go
english.PluralWord(1, "object", "") // object
english.PluralWord(42, "object", "") // objects
english.PluralWord(2, "bus", "") // buses
english.PluralWord(99, "locus", "loci") // loci
english.Plural(1, "object", "") // 1 object
english.Plural(42, "object", "") // 42 objects
english.Plural(2, "bus", "") // 2 buses
english.Plural(99, "locus", "loci") // 99 loci
```
### Word series
Format comma-separated words lists with conjuctions:
```go
english.WordSeries([]string{"foo"}, "and") // foo
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
```
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix

View File

@@ -1,31 +0,0 @@
package humanize
import (
"math/big"
)
// order of magnitude (to a max order)
func oomm(n, b *big.Int, maxmag int) (float64, int) {
mag := 0
m := &big.Int{}
for n.Cmp(b) >= 0 {
n.DivMod(n, b, m)
mag++
if mag == maxmag && maxmag >= 0 {
break
}
}
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
}
// total order of magnitude
// (same as above, but with no upper limit)
func oom(n, b *big.Int) (float64, int) {
mag := 0
m := &big.Int{}
for n.Cmp(b) >= 0 {
n.DivMod(n, b, m)
mag++
}
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
}

View File

@@ -1,189 +0,0 @@
package humanize
import (
"fmt"
"math/big"
"strings"
"unicode"
)
var (
bigIECExp = big.NewInt(1024)
// BigByte is one byte in bit.Ints
BigByte = big.NewInt(1)
// BigKiByte is 1,024 bytes in bit.Ints
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
// BigMiByte is 1,024 k bytes in bit.Ints
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
// BigGiByte is 1,024 m bytes in bit.Ints
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
// BigTiByte is 1,024 g bytes in bit.Ints
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
// BigPiByte is 1,024 t bytes in bit.Ints
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
// BigEiByte is 1,024 p bytes in bit.Ints
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
// BigZiByte is 1,024 e bytes in bit.Ints
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
// BigYiByte is 1,024 z bytes in bit.Ints
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
// BigRiByte is 1,024 y bytes in bit.Ints
BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp)
// BigQiByte is 1,024 r bytes in bit.Ints
BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp)
)
var (
bigSIExp = big.NewInt(1000)
// BigSIByte is one SI byte in big.Ints
BigSIByte = big.NewInt(1)
// BigKByte is 1,000 SI bytes in big.Ints
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
// BigMByte is 1,000 SI k bytes in big.Ints
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
// BigGByte is 1,000 SI m bytes in big.Ints
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
// BigTByte is 1,000 SI g bytes in big.Ints
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
// BigPByte is 1,000 SI t bytes in big.Ints
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
// BigEByte is 1,000 SI p bytes in big.Ints
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
// BigZByte is 1,000 SI e bytes in big.Ints
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
// BigYByte is 1,000 SI z bytes in big.Ints
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
// BigRByte is 1,000 SI y bytes in big.Ints
BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp)
// BigQByte is 1,000 SI r bytes in big.Ints
BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp)
)
var bigBytesSizeTable = map[string]*big.Int{
"b": BigByte,
"kib": BigKiByte,
"kb": BigKByte,
"mib": BigMiByte,
"mb": BigMByte,
"gib": BigGiByte,
"gb": BigGByte,
"tib": BigTiByte,
"tb": BigTByte,
"pib": BigPiByte,
"pb": BigPByte,
"eib": BigEiByte,
"eb": BigEByte,
"zib": BigZiByte,
"zb": BigZByte,
"yib": BigYiByte,
"yb": BigYByte,
"rib": BigRiByte,
"rb": BigRByte,
"qib": BigQiByte,
"qb": BigQByte,
// Without suffix
"": BigByte,
"ki": BigKiByte,
"k": BigKByte,
"mi": BigMiByte,
"m": BigMByte,
"gi": BigGiByte,
"g": BigGByte,
"ti": BigTiByte,
"t": BigTByte,
"pi": BigPiByte,
"p": BigPByte,
"ei": BigEiByte,
"e": BigEByte,
"z": BigZByte,
"zi": BigZiByte,
"y": BigYByte,
"yi": BigYiByte,
"r": BigRByte,
"ri": BigRiByte,
"q": BigQByte,
"qi": BigQiByte,
}
var ten = big.NewInt(10)
func humanateBigBytes(s, base *big.Int, sizes []string) string {
if s.Cmp(ten) < 0 {
return fmt.Sprintf("%d B", s)
}
c := (&big.Int{}).Set(s)
val, mag := oomm(c, base, len(sizes)-1)
suffix := sizes[mag]
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
// BigBytes produces a human readable representation of an SI size.
//
// See also: ParseBigBytes.
//
// BigBytes(82854982) -> 83 MB
func BigBytes(s *big.Int) string {
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"}
return humanateBigBytes(s, bigSIExp, sizes)
}
// BigIBytes produces a human readable representation of an IEC size.
//
// See also: ParseBigBytes.
//
// BigIBytes(82854982) -> 79 MiB
func BigIBytes(s *big.Int) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"}
return humanateBigBytes(s, bigIECExp, sizes)
}
// ParseBigBytes parses a string representation of bytes into the number
// of bytes it represents.
//
// See also: BigBytes, BigIBytes.
//
// ParseBigBytes("42 MB") -> 42000000, nil
// ParseBigBytes("42 mib") -> 44040192, nil
func ParseBigBytes(s string) (*big.Int, error) {
lastDigit := 0
hasComma := false
for _, r := range s {
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
break
}
if r == ',' {
hasComma = true
}
lastDigit++
}
num := s[:lastDigit]
if hasComma {
num = strings.Replace(num, ",", "", -1)
}
val := &big.Rat{}
_, err := fmt.Sscanf(num, "%f", val)
if err != nil {
return nil, err
}
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
if m, ok := bigBytesSizeTable[extra]; ok {
mv := (&big.Rat{}).SetInt(m)
val.Mul(val, mv)
rv := &big.Int{}
rv.Div(val.Num(), val.Denom())
return rv, nil
}
return nil, fmt.Errorf("unhandled size name: %v", extra)
}

View File

@@ -1,143 +0,0 @@
package humanize
import (
"fmt"
"math"
"strconv"
"strings"
"unicode"
)
// IEC Sizes.
// kibis of bits
const (
Byte = 1 << (iota * 10)
KiByte
MiByte
GiByte
TiByte
PiByte
EiByte
)
// SI Sizes.
const (
IByte = 1
KByte = IByte * 1000
MByte = KByte * 1000
GByte = MByte * 1000
TByte = GByte * 1000
PByte = TByte * 1000
EByte = PByte * 1000
)
var bytesSizeTable = map[string]uint64{
"b": Byte,
"kib": KiByte,
"kb": KByte,
"mib": MiByte,
"mb": MByte,
"gib": GiByte,
"gb": GByte,
"tib": TiByte,
"tb": TByte,
"pib": PiByte,
"pb": PByte,
"eib": EiByte,
"eb": EByte,
// Without suffix
"": Byte,
"ki": KiByte,
"k": KByte,
"mi": MiByte,
"m": MByte,
"gi": GiByte,
"g": GByte,
"ti": TiByte,
"t": TByte,
"pi": PiByte,
"p": PByte,
"ei": EiByte,
"e": EByte,
}
func logn(n, b float64) float64 {
return math.Log(n) / math.Log(b)
}
func humanateBytes(s uint64, base float64, sizes []string) string {
if s < 10 {
return fmt.Sprintf("%d B", s)
}
e := math.Floor(logn(float64(s), base))
suffix := sizes[int(e)]
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
// Bytes produces a human readable representation of an SI size.
//
// See also: ParseBytes.
//
// Bytes(82854982) -> 83 MB
func Bytes(s uint64) string {
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
return humanateBytes(s, 1000, sizes)
}
// IBytes produces a human readable representation of an IEC size.
//
// See also: ParseBytes.
//
// IBytes(82854982) -> 79 MiB
func IBytes(s uint64) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
return humanateBytes(s, 1024, sizes)
}
// ParseBytes parses a string representation of bytes into the number
// of bytes it represents.
//
// See Also: Bytes, IBytes.
//
// ParseBytes("42 MB") -> 42000000, nil
// ParseBytes("42 mib") -> 44040192, nil
func ParseBytes(s string) (uint64, error) {
lastDigit := 0
hasComma := false
for _, r := range s {
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
break
}
if r == ',' {
hasComma = true
}
lastDigit++
}
num := s[:lastDigit]
if hasComma {
num = strings.Replace(num, ",", "", -1)
}
f, err := strconv.ParseFloat(num, 64)
if err != nil {
return 0, err
}
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
if m, ok := bytesSizeTable[extra]; ok {
f *= float64(m)
if f >= math.MaxUint64 {
return 0, fmt.Errorf("too large: %v", s)
}
return uint64(f), nil
}
return 0, fmt.Errorf("unhandled size name: %v", extra)
}

View File

@@ -1,116 +0,0 @@
package humanize
import (
"bytes"
"math"
"math/big"
"strconv"
"strings"
)
// Comma produces a string form of the given number in base 10 with
// commas after every three orders of magnitude.
//
// e.g. Comma(834142) -> 834,142
func Comma(v int64) string {
sign := ""
// Min int64 can't be negated to a usable value, so it has to be special cased.
if v == math.MinInt64 {
return "-9,223,372,036,854,775,808"
}
if v < 0 {
sign = "-"
v = 0 - v
}
parts := []string{"", "", "", "", "", "", ""}
j := len(parts) - 1
for v > 999 {
parts[j] = strconv.FormatInt(v%1000, 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
v = v / 1000
j--
}
parts[j] = strconv.Itoa(int(v))
return sign + strings.Join(parts[j:], ",")
}
// Commaf produces a string form of the given number in base 10 with
// commas after every three orders of magnitude.
//
// e.g. Commaf(834142.32) -> 834,142.32
func Commaf(v float64) string {
buf := &bytes.Buffer{}
if v < 0 {
buf.Write([]byte{'-'})
v = 0 - v
}
comma := []byte{','}
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
pos := 0
if len(parts[0])%3 != 0 {
pos += len(parts[0]) % 3
buf.WriteString(parts[0][:pos])
buf.Write(comma)
}
for ; pos < len(parts[0]); pos += 3 {
buf.WriteString(parts[0][pos : pos+3])
buf.Write(comma)
}
buf.Truncate(buf.Len() - 1)
if len(parts) > 1 {
buf.Write([]byte{'.'})
buf.WriteString(parts[1])
}
return buf.String()
}
// CommafWithDigits works like the Commaf but limits the resulting
// string to the given number of decimal places.
//
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
func CommafWithDigits(f float64, decimals int) string {
return stripTrailingDigits(Commaf(f), decimals)
}
// BigComma produces a string form of the given big.Int in base 10
// with commas after every three orders of magnitude.
func BigComma(b *big.Int) string {
sign := ""
if b.Sign() < 0 {
sign = "-"
b.Abs(b)
}
athousand := big.NewInt(1000)
c := (&big.Int{}).Set(b)
_, m := oom(c, athousand)
parts := make([]string, m+1)
j := len(parts) - 1
mod := &big.Int{}
for b.Cmp(athousand) >= 0 {
b.DivMod(b, athousand, mod)
parts[j] = strconv.FormatInt(mod.Int64(), 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
j--
}
parts[j] = strconv.Itoa(int(b.Int64()))
return sign + strings.Join(parts[j:], ",")
}

View File

@@ -1,41 +0,0 @@
//go:build go1.6
// +build go1.6
package humanize
import (
"bytes"
"math/big"
"strings"
)
// BigCommaf produces a string form of the given big.Float in base 10
// with commas after every three orders of magnitude.
func BigCommaf(v *big.Float) string {
buf := &bytes.Buffer{}
if v.Sign() < 0 {
buf.Write([]byte{'-'})
v.Abs(v)
}
comma := []byte{','}
parts := strings.Split(v.Text('f', -1), ".")
pos := 0
if len(parts[0])%3 != 0 {
pos += len(parts[0]) % 3
buf.WriteString(parts[0][:pos])
buf.Write(comma)
}
for ; pos < len(parts[0]); pos += 3 {
buf.WriteString(parts[0][pos : pos+3])
buf.Write(comma)
}
buf.Truncate(buf.Len() - 1)
if len(parts) > 1 {
buf.Write([]byte{'.'})
buf.WriteString(parts[1])
}
return buf.String()
}

View File

@@ -1,49 +0,0 @@
package humanize
import (
"strconv"
"strings"
)
func stripTrailingZeros(s string) string {
if !strings.ContainsRune(s, '.') {
return s
}
offset := len(s) - 1
for offset > 0 {
if s[offset] == '.' {
offset--
break
}
if s[offset] != '0' {
break
}
offset--
}
return s[:offset+1]
}
func stripTrailingDigits(s string, digits int) string {
if i := strings.Index(s, "."); i >= 0 {
if digits <= 0 {
return s[:i]
}
i++
if i+digits >= len(s) {
return s
}
return s[:i+digits]
}
return s
}
// Ftoa converts a float to a string with no trailing zeros.
func Ftoa(num float64) string {
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
}
// FtoaWithDigits converts a float to a string but limits the resulting string
// to the given number of decimal places, and no trailing zeros.
func FtoaWithDigits(num float64, digits int) string {
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
}

View File

@@ -1,8 +0,0 @@
/*
Package humanize converts boring ugly numbers to human-friendly strings and back.
Durations can be turned into strings such as "3 days ago", numbers
representing sizes like 82854982 into useful strings like, "83 MB" or
"79 MiB" (whichever you prefer).
*/
package humanize

View File

@@ -1,192 +0,0 @@
package humanize
/*
Slightly adapted from the source to fit go-humanize.
Author: https://github.com/gorhill
Source: https://gist.github.com/gorhill/5285193
*/
import (
"math"
"strconv"
)
var (
renderFloatPrecisionMultipliers = [...]float64{
1,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000,
}
renderFloatPrecisionRounders = [...]float64{
0.5,
0.05,
0.005,
0.0005,
0.00005,
0.000005,
0.0000005,
0.00000005,
0.000000005,
0.0000000005,
}
)
// FormatFloat produces a formatted number as string based on the following user-specified criteria:
// * thousands separator
// * decimal separator
// * decimal precision
//
// Usage: s := RenderFloat(format, n)
// The format parameter tells how to render the number n.
//
// See examples: http://play.golang.org/p/LXc1Ddm1lJ
//
// Examples of format strings, given n = 12345.6789:
// "#,###.##" => "12,345.67"
// "#,###." => "12,345"
// "#,###" => "12345,678"
// "#\u202F###,##" => "12345,68"
// "#.###,###### => 12.345,678900
// "" (aka default format) => 12,345.67
//
// The highest precision allowed is 9 digits after the decimal symbol.
// There is also a version for integer number, FormatInteger(),
// which is convenient for calls within template.
func FormatFloat(format string, n float64) string {
// Special cases:
// NaN = "NaN"
// +Inf = "+Infinity"
// -Inf = "-Infinity"
if math.IsNaN(n) {
return "NaN"
}
if n > math.MaxFloat64 {
return "Infinity"
}
if n < (0.0 - math.MaxFloat64) {
return "-Infinity"
}
// default format
precision := 2
decimalStr := "."
thousandStr := ","
positiveStr := ""
negativeStr := "-"
if len(format) > 0 {
format := []rune(format)
// If there is an explicit format directive,
// then default values are these:
precision = 9
thousandStr = ""
// collect indices of meaningful formatting directives
formatIndx := []int{}
for i, char := range format {
if char != '#' && char != '0' {
formatIndx = append(formatIndx, i)
}
}
if len(formatIndx) > 0 {
// Directive at index 0:
// Must be a '+'
// Raise an error if not the case
// index: 0123456789
// +0.000,000
// +000,000.0
// +0000.00
// +0000
if formatIndx[0] == 0 {
if format[formatIndx[0]] != '+' {
panic("RenderFloat(): invalid positive sign directive")
}
positiveStr = "+"
formatIndx = formatIndx[1:]
}
// Two directives:
// First is thousands separator
// Raise an error if not followed by 3-digit
// 0123456789
// 0.000,000
// 000,000.00
if len(formatIndx) == 2 {
if (formatIndx[1] - formatIndx[0]) != 4 {
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
}
thousandStr = string(format[formatIndx[0]])
formatIndx = formatIndx[1:]
}
// One directive:
// Directive is decimal separator
// The number of digit-specifier following the separator indicates wanted precision
// 0123456789
// 0.00
// 000,0000
if len(formatIndx) == 1 {
decimalStr = string(format[formatIndx[0]])
precision = len(format) - formatIndx[0] - 1
}
}
}
// generate sign part
var signStr string
if n >= 0.000000001 {
signStr = positiveStr
} else if n <= -0.000000001 {
signStr = negativeStr
n = -n
} else {
signStr = ""
n = 0.0
}
// split number into integer and fractional parts
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
// generate integer part string
intStr := strconv.FormatInt(int64(intf), 10)
// add thousand separator if required
if len(thousandStr) > 0 {
for i := len(intStr); i > 3; {
i -= 3
intStr = intStr[:i] + thousandStr + intStr[i:]
}
}
// no fractional part, we can leave now
if precision == 0 {
return signStr + intStr
}
// generate fractional part
fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
// may need padding
if len(fracStr) < precision {
fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
}
return signStr + intStr + decimalStr + fracStr
}
// FormatInteger produces a formatted number as string.
// See FormatFloat.
func FormatInteger(format string, n int) string {
return FormatFloat(format, float64(n))
}

View File

@@ -1,25 +0,0 @@
package humanize
import "strconv"
// Ordinal gives you the input number in a rank/ordinal format.
//
// Ordinal(3) -> 3rd
func Ordinal(x int) string {
suffix := "th"
switch x % 10 {
case 1:
if x%100 != 11 {
suffix = "st"
}
case 2:
if x%100 != 12 {
suffix = "nd"
}
case 3:
if x%100 != 13 {
suffix = "rd"
}
}
return strconv.Itoa(x) + suffix
}

View File

@@ -1,127 +0,0 @@
package humanize
import (
"errors"
"math"
"regexp"
"strconv"
)
var siPrefixTable = map[float64]string{
-30: "q", // quecto
-27: "r", // ronto
-24: "y", // yocto
-21: "z", // zepto
-18: "a", // atto
-15: "f", // femto
-12: "p", // pico
-9: "n", // nano
-6: "µ", // micro
-3: "m", // milli
0: "",
3: "k", // kilo
6: "M", // mega
9: "G", // giga
12: "T", // tera
15: "P", // peta
18: "E", // exa
21: "Z", // zetta
24: "Y", // yotta
27: "R", // ronna
30: "Q", // quetta
}
var revSIPrefixTable = revfmap(siPrefixTable)
// revfmap reverses the map and precomputes the power multiplier
func revfmap(in map[float64]string) map[string]float64 {
rv := map[string]float64{}
for k, v := range in {
rv[v] = math.Pow(10, k)
}
return rv
}
var riParseRegex *regexp.Regexp
func init() {
ri := `^([\-0-9.]+)\s?([`
for _, v := range siPrefixTable {
ri += v
}
ri += `]?)(.*)`
riParseRegex = regexp.MustCompile(ri)
}
// ComputeSI finds the most appropriate SI prefix for the given number
// and returns the prefix along with the value adjusted to be within
// that prefix.
//
// See also: SI, ParseSI.
//
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
func ComputeSI(input float64) (float64, string) {
if input == 0 {
return 0, ""
}
mag := math.Abs(input)
exponent := math.Floor(logn(mag, 10))
exponent = math.Floor(exponent/3) * 3
value := mag / math.Pow(10, exponent)
// Handle special case where value is exactly 1000.0
// Should return 1 M instead of 1000 k
if value == 1000.0 {
exponent += 3
value = mag / math.Pow(10, exponent)
}
value = math.Copysign(value, input)
prefix := siPrefixTable[exponent]
return value, prefix
}
// SI returns a string with default formatting.
//
// SI uses Ftoa to format float value, removing trailing zeros.
//
// See also: ComputeSI, ParseSI.
//
// e.g. SI(1000000, "B") -> 1 MB
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
func SI(input float64, unit string) string {
value, prefix := ComputeSI(input)
return Ftoa(value) + " " + prefix + unit
}
// SIWithDigits works like SI but limits the resulting string to the
// given number of decimal places.
//
// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
func SIWithDigits(input float64, decimals int, unit string) string {
value, prefix := ComputeSI(input)
return FtoaWithDigits(value, decimals) + " " + prefix + unit
}
var errInvalid = errors.New("invalid input")
// ParseSI parses an SI string back into the number and unit.
//
// See also: SI, ComputeSI.
//
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
func ParseSI(input string) (float64, string, error) {
found := riParseRegex.FindStringSubmatch(input)
if len(found) != 4 {
return 0, "", errInvalid
}
mag := revSIPrefixTable[found[2]]
unit := found[3]
base, err := strconv.ParseFloat(found[1], 64)
return base * mag, unit, err
}

View File

@@ -1,117 +0,0 @@
package humanize
import (
"fmt"
"math"
"sort"
"time"
)
// Seconds-based time units
const (
Day = 24 * time.Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
LongTime = 37 * Year
)
// Time formats a time into a relative string.
//
// Time(someT) -> "3 weeks ago"
func Time(then time.Time) string {
return RelTime(then, time.Now(), "ago", "from now")
}
// A RelTimeMagnitude struct contains a relative time point at which
// the relative format of time will switch to a new format string. A
// slice of these in ascending order by their "D" field is passed to
// CustomRelTime to format durations.
//
// The Format field is a string that may contain a "%s" which will be
// replaced with the appropriate signed label (e.g. "ago" or "from
// now") and a "%d" that will be replaced by the quantity.
//
// The DivBy field is the amount of time the time difference must be
// divided by in order to display correctly.
//
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
// DivBy should be time.Minute so whatever the duration is will be
// expressed in minutes.
type RelTimeMagnitude struct {
D time.Duration
Format string
DivBy time.Duration
}
var defaultMagnitudes = []RelTimeMagnitude{
{time.Second, "now", time.Second},
{2 * time.Second, "1 second %s", 1},
{time.Minute, "%d seconds %s", time.Second},
{2 * time.Minute, "1 minute %s", 1},
{time.Hour, "%d minutes %s", time.Minute},
{2 * time.Hour, "1 hour %s", 1},
{Day, "%d hours %s", time.Hour},
{2 * Day, "1 day %s", 1},
{Week, "%d days %s", Day},
{2 * Week, "1 week %s", 1},
{Month, "%d weeks %s", Week},
{2 * Month, "1 month %s", 1},
{Year, "%d months %s", Month},
{18 * Month, "1 year %s", 1},
{2 * Year, "2 years %s", 1},
{LongTime, "%d years %s", Year},
{math.MaxInt64, "a long while %s", 1},
}
// RelTime formats a time into a relative string.
//
// It takes two times and two labels. In addition to the generic time
// delta string (e.g. 5 minutes), the labels are used applied so that
// the label corresponding to the smaller time is applied.
//
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
func RelTime(a, b time.Time, albl, blbl string) string {
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
}
// CustomRelTime formats a time into a relative string.
//
// It takes two times two labels and a table of relative time formats.
// In addition to the generic time delta string (e.g. 5 minutes), the
// labels are used applied so that the label corresponding to the
// smaller time is applied.
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
lbl := albl
diff := b.Sub(a)
if a.After(b) {
lbl = blbl
diff = a.Sub(b)
}
n := sort.Search(len(magnitudes), func(i int) bool {
return magnitudes[i].D > diff
})
if n >= len(magnitudes) {
n = len(magnitudes) - 1
}
mag := magnitudes[n]
args := []interface{}{}
escaped := false
for _, ch := range mag.Format {
if escaped {
switch ch {
case 's':
args = append(args, lbl)
case 'd':
args = append(args, diff/mag.DivBy)
}
escaped = false
} else {
escaped = ch == '%'
}
}
return fmt.Sprintf(mag.Format, args...)
}

View File

@@ -1,14 +0,0 @@
freebsd_task:
name: 'FreeBSD'
freebsd_instance:
image_family: freebsd-14-2
install_script:
- pkg update -f
- pkg install -y go
test_script:
# run tests as user "cirrus" instead of root
- pw useradd cirrus -m
- chown -R cirrus:cirrus .
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
- FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...

View File

@@ -1,10 +0,0 @@
# go test -c output
*.test
*.test.exe
# Output of go build ./cmd/fsnotify
/fsnotify
/fsnotify.exe
/test/kqueue
/test/a.out

View File

@@ -1,2 +0,0 @@
Chris Howey <howeyc@gmail.com> <chris@howey.me>
Nathan Youngman <git@nathany.com> <4566+nathany@users.noreply.github.com>

View File

@@ -1,602 +0,0 @@
# Changelog
1.9.0 2024-04-04
----------------
### Changes and fixes
- all: make BufferedWatcher buffered again ([#657])
- inotify: fix race when adding/removing watches while a watched path is being
deleted ([#678], [#686])
- inotify: don't send empty event if a watched path is unmounted ([#655])
- inotify: don't register duplicate watches when watching both a symlink and its
target; previously that would get "half-added" and removing the second would
panic ([#679])
- kqueue: fix watching relative symlinks ([#681])
- kqueue: correctly mark pre-existing entries when watching a link to a dir on
kqueue ([#682])
- illumos: don't send error if changed file is deleted while processing the
event ([#678])
[#657]: https://github.com/fsnotify/fsnotify/pull/657
[#678]: https://github.com/fsnotify/fsnotify/pull/678
[#686]: https://github.com/fsnotify/fsnotify/pull/686
[#655]: https://github.com/fsnotify/fsnotify/pull/655
[#681]: https://github.com/fsnotify/fsnotify/pull/681
[#679]: https://github.com/fsnotify/fsnotify/pull/679
[#682]: https://github.com/fsnotify/fsnotify/pull/682
1.8.0 2024-10-31
----------------
### Additions
- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
### Changes and fixes
- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
- kqueue: ignore events with Ident=0 ([#590])
- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
- inotify: fix panic when calling Remove() in a goroutine ([#650])
- fen: allow watching subdirectories of watched directories ([#621])
[#590]: https://github.com/fsnotify/fsnotify/pull/590
[#610]: https://github.com/fsnotify/fsnotify/pull/610
[#617]: https://github.com/fsnotify/fsnotify/pull/617
[#619]: https://github.com/fsnotify/fsnotify/pull/619
[#620]: https://github.com/fsnotify/fsnotify/pull/620
[#621]: https://github.com/fsnotify/fsnotify/pull/621
[#625]: https://github.com/fsnotify/fsnotify/pull/625
[#650]: https://github.com/fsnotify/fsnotify/pull/650
1.7.0 - 2023-10-22
------------------
This version of fsnotify needs Go 1.17.
### Additions
- illumos: add FEN backend to support illumos and Solaris. ([#371])
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
in cases where you can't control the kernel buffer and receive a large number
of events in bursts. ([#550], [#572])
- all: add `AddWith()`, which is identical to `Add()` but allows passing
options. ([#521])
- windows: allow setting the ReadDirectoryChangesW() buffer size with
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
works on all platforms and is enough for most purposes, but in some cases a
highest buffer is needed. ([#521])
### Changes and fixes
- inotify: remove watcher if a watched path is renamed ([#518])
After a rename the reported name wasn't updated, or even an empty string.
Inotify doesn't provide any good facilities to update it, so just remove the
watcher. This is already how it worked on kqueue and FEN.
On Windows this does work, and remains working.
- windows: don't listen for file attribute changes ([#520])
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
with no way to see if they're a file write or attribute change, so would show
up as a fsnotify.Write event. This is never useful, and could result in many
spurious Write events.
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
Before it would merely return "short read", making it hard to detect this
error.
- kqueue: make sure events for all files are delivered properly when removing a
watched directory ([#526])
Previously they would get sent with `""` (empty string) or `"."` as the path
name.
- kqueue: don't emit spurious Create events for symbolic links ([#524])
The link would get resolved but kqueue would "forget" it already saw the link
itself, resulting on a Create for every Write event for the directory.
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
`backend_other.go`, making it easier to use on unsupported platforms such as
WASM, AIX, etc. ([#528])
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
Google AppEngine forbids usage of the unsafe package so the inotify backend
won't compile there.
[#371]: https://github.com/fsnotify/fsnotify/pull/371
[#516]: https://github.com/fsnotify/fsnotify/pull/516
[#518]: https://github.com/fsnotify/fsnotify/pull/518
[#520]: https://github.com/fsnotify/fsnotify/pull/520
[#521]: https://github.com/fsnotify/fsnotify/pull/521
[#524]: https://github.com/fsnotify/fsnotify/pull/524
[#525]: https://github.com/fsnotify/fsnotify/pull/525
[#526]: https://github.com/fsnotify/fsnotify/pull/526
[#528]: https://github.com/fsnotify/fsnotify/pull/528
[#537]: https://github.com/fsnotify/fsnotify/pull/537
[#550]: https://github.com/fsnotify/fsnotify/pull/550
[#572]: https://github.com/fsnotify/fsnotify/pull/572
1.6.0 - 2022-10-13
------------------
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
but not documented). It also increases the minimum Linux version to 2.6.32.
### Additions
- all: add `Event.Has()` and `Op.Has()` ([#477])
This makes checking events a lot easier; for example:
if event.Op&Write == Write && !(event.Op&Remove == Remove) {
}
Becomes:
if event.Has(Write) && !event.Has(Remove) {
}
- all: add cmd/fsnotify ([#463])
A command-line utility for testing and some examples.
### Changes and fixes
- inotify: don't ignore events for files that don't exist ([#260], [#470])
Previously the inotify watcher would call `os.Lstat()` to check if a file
still exists before emitting events.
This was inconsistent with other platforms and resulted in inconsistent event
reporting (e.g. when a file is quickly removed and re-created), and generally
a source of confusion. It was added in 2013 to fix a memory leak that no
longer exists.
- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
not watched ([#460])
- inotify: replace epoll() with non-blocking inotify ([#434])
Non-blocking inotify was not generally available at the time this library was
written in 2014, but now it is. As a result, the minimum Linux version is
bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
- kqueue: don't check for events every 100ms ([#480])
The watcher would wake up every 100ms, even when there was nothing to do. Now
it waits until there is something to do.
- macos: retry opening files on EINTR ([#475])
- kqueue: skip unreadable files ([#479])
kqueue requires a file descriptor for every file in a directory; this would
fail if a file was unreadable by the current user. Now these files are simply
skipped.
- windows: fix renaming a watched directory if the parent is also watched ([#370])
- windows: increase buffer size from 4K to 64K ([#485])
- windows: close file handle on Remove() ([#288])
- kqueue: put pathname in the error if watching a file fails ([#471])
- inotify, windows: calling Close() more than once could race ([#465])
- kqueue: improve Close() performance ([#233])
- all: various documentation additions and clarifications.
[#233]: https://github.com/fsnotify/fsnotify/pull/233
[#260]: https://github.com/fsnotify/fsnotify/pull/260
[#288]: https://github.com/fsnotify/fsnotify/pull/288
[#370]: https://github.com/fsnotify/fsnotify/pull/370
[#434]: https://github.com/fsnotify/fsnotify/pull/434
[#460]: https://github.com/fsnotify/fsnotify/pull/460
[#463]: https://github.com/fsnotify/fsnotify/pull/463
[#465]: https://github.com/fsnotify/fsnotify/pull/465
[#470]: https://github.com/fsnotify/fsnotify/pull/470
[#471]: https://github.com/fsnotify/fsnotify/pull/471
[#475]: https://github.com/fsnotify/fsnotify/pull/475
[#477]: https://github.com/fsnotify/fsnotify/pull/477
[#479]: https://github.com/fsnotify/fsnotify/pull/479
[#480]: https://github.com/fsnotify/fsnotify/pull/480
[#485]: https://github.com/fsnotify/fsnotify/pull/485
## [1.5.4] - 2022-04-25
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
## [1.5.3] - 2022-04-22
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
## [1.5.2] - 2022-04-21
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
## [1.5.1] - 2021-08-24
* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
## [1.5.0] - 2021-08-20
* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
* CI: Use GitHub Actions for CI and cover go 1.12-1.17
[#378](https://github.com/fsnotify/fsnotify/pull/378)
[#381](https://github.com/fsnotify/fsnotify/pull/381)
[#385](https://github.com/fsnotify/fsnotify/pull/385)
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
## [1.4.9] - 2020-03-11
* Move example usage to the readme #329. This may resolve #328.
## [1.4.8] - 2020-03-10
* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
* CI: Less verbosity (@nathany #267)
* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
* Tests: Check if channels are closed in the example (@alexeykazakov #244)
* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
* CI: Add windows to travis matrix (@cpuguy83 #284)
* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
* Linux: open files with close-on-exec (@linxiulei #273)
* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
* Project: Add go.mod (@nathany #309)
* Project: Revise editor config (@nathany #309)
* Project: Update copyright for 2019 (@nathany #309)
* CI: Drop go1.8 from CI matrix (@nathany #309)
* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
## [1.4.7] - 2018-01-09
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
* Tests: Fix missing verb on format string (thanks @rchiossi)
* Linux: Fix deadlock in Remove (thanks @aarondl)
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
* Docs: Moved FAQ into the README (thanks @vahe)
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
* Docs: replace references to OS X with macOS
## [1.4.2] - 2016-10-10
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
## [1.4.1] - 2016-10-04
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
## [1.4.0] - 2016-10-01
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
## [1.3.1] - 2016-06-28
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
## [1.3.0] - 2016-04-19
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
## [1.2.10] - 2016-03-02
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
## [1.2.9] - 2016-01-13
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
## [1.2.8] - 2015-12-17
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
* inotify: fix race in test
* enable race detection for continuous integration (Linux, Mac, Windows)
## [1.2.5] - 2015-10-17
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
## [1.2.1] - 2015-10-14
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
## [1.2.0] - 2015-02-08
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
## [1.1.1] - 2015-02-05
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
## [1.1.0] - 2014-12-12
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
* add low-level functions
* only need to store flags on directories
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
* done can be an unbuffered channel
* remove calls to os.NewSyscallError
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## [1.0.4] - 2014-09-07
* kqueue: add dragonfly to the build tags.
* Rename source code files, rearrange code so exported APIs are at the top.
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
## [1.0.3] - 2014-08-19
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
## [1.0.2] - 2014-08-17
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
## [1.0.0] - 2014-08-15
* [API] Remove AddWatch on Windows, use Add.
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
* Minor updates based on feedback from golint.
## dev / 2014-07-09
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
## dev / 2014-07-04
* kqueue: fix incorrect mutex used in Close()
* Update example to demonstrate usage of Op.
## dev / 2014-06-28
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
* Fix for String() method on Event (thanks Alex Brainman)
* Don't build on Plan 9 or Solaris (thanks @4ad)
## dev / 2014-06-21
* Events channel of type Event rather than *Event.
* [internal] use syscall constants directly for inotify and kqueue.
* [internal] kqueue: rename events to kevents and fileEvent to event.
## dev / 2014-06-19
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
* [internal] remove cookie from Event struct (unused).
* [internal] Event struct has the same definition across every OS.
* [internal] remove internal watch and removeWatch methods.
## dev / 2014-06-12
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
* [API] Pluralized channel names: Events and Errors.
* [API] Renamed FileEvent struct to Event.
* [API] Op constants replace methods like IsCreate().
## dev / 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## dev / 2014-05-23
* [API] Remove current implementation of WatchFlags.
* current implementation doesn't take advantage of OS for efficiency
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
* no tests for the current implementation
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
## [0.9.3] - 2014-12-31
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## [0.9.2] - 2014-08-17
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
## [0.9.1] - 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## [0.9.0] - 2014-01-17
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
## [0.8.12] - 2013-11-13
* [API] Remove FD_SET and friends from Linux adapter
## [0.8.11] - 2013-11-02
* [Doc] Add Changelog [#72][] (thanks @nathany)
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
## [0.8.10] - 2013-10-19
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
* [Doc] specify OS-specific limits in README (thanks @debrando)
## [0.8.9] - 2013-09-08
* [Doc] Contributing (thanks @nathany)
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
* [Doc] GoCI badge in README (Linux only) [#60][]
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
## [0.8.8] - 2013-06-17
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
## [0.8.7] - 2013-06-03
* [API] Make syscall flags internal
* [Fix] inotify: ignore event changes
* [Fix] race in symlink test [#45][] (reported by @srid)
* [Fix] tests on Windows
* lower case error messages
## [0.8.6] - 2013-05-23
* kqueue: Use EVT_ONLY flag on Darwin
* [Doc] Update README with full example
## [0.8.5] - 2013-05-09
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
## [0.8.4] - 2013-04-07
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
## [0.8.3] - 2013-03-13
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
## [0.8.2] - 2013-02-07
* [Doc] add Authors
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
## [0.8.1] - 2013-01-09
* [Fix] Windows path separators
* [Doc] BSD License
## [0.8.0] - 2012-11-09
* kqueue: directory watching improvements (thanks @vmirage)
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
## [0.7.4] - 2012-10-09
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
* [Fix] kqueue: modify after recreation of file
## [0.7.3] - 2012-09-27
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
* [Fix] kqueue: no longer get duplicate CREATE events
## [0.7.2] - 2012-09-01
* kqueue: events for created directories
## [0.7.1] - 2012-07-14
* [Fix] for renaming files
## [0.7.0] - 2012-07-02
* [Feature] FSNotify flags
* [Fix] inotify: Added file name back to event path
## [0.6.0] - 2012-06-06
* kqueue: watch files after directory created (thanks @tmc)
## [0.5.1] - 2012-05-22
* [Fix] inotify: remove all watches before Close()
## [0.5.0] - 2012-05-03
* [API] kqueue: return errors during watch instead of sending over channel
* kqueue: match symlink behavior on Linux
* inotify: add `DELETE_SELF` (requested by @taralx)
* [Fix] kqueue: handle EINTR (reported by @robfig)
* [Doc] Godoc example [#1][] (thanks @davecheney)
## [0.4.0] - 2012-03-30
* Go 1 released: build with go tool
* [Feature] Windows support using winfsnotify
* Windows does not have attribute change notifications
* Roll attribute notifications into IsModify
## [0.3.0] - 2012-02-19
* kqueue: add files when watch directory
## [0.2.0] - 2011-12-30
* update to latest Go weekly code
## [0.1.0] - 2011-10-19
* kqueue: add watch on file creation to match inotify
* kqueue: create file event
* inotify: ignore `IN_IGNORED` events
* event String()
* linux: common FileEvent functions
* initial commit
[#79]: https://github.com/howeyc/fsnotify/pull/79
[#77]: https://github.com/howeyc/fsnotify/pull/77
[#72]: https://github.com/howeyc/fsnotify/issues/72
[#71]: https://github.com/howeyc/fsnotify/issues/71
[#70]: https://github.com/howeyc/fsnotify/issues/70
[#63]: https://github.com/howeyc/fsnotify/issues/63
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#60]: https://github.com/howeyc/fsnotify/issues/60
[#59]: https://github.com/howeyc/fsnotify/issues/59
[#49]: https://github.com/howeyc/fsnotify/issues/49
[#45]: https://github.com/howeyc/fsnotify/issues/45
[#40]: https://github.com/howeyc/fsnotify/issues/40
[#36]: https://github.com/howeyc/fsnotify/issues/36
[#33]: https://github.com/howeyc/fsnotify/issues/33
[#29]: https://github.com/howeyc/fsnotify/issues/29
[#25]: https://github.com/howeyc/fsnotify/issues/25
[#24]: https://github.com/howeyc/fsnotify/issues/24
[#21]: https://github.com/howeyc/fsnotify/issues/21

View File

@@ -1,145 +0,0 @@
Thank you for your interest in contributing to fsnotify! We try to review and
merge PRs in a reasonable timeframe, but please be aware that:
- To avoid "wasted" work, please discuss changes on the issue tracker first. You
can just send PRs, but they may end up being rejected for one reason or the
other.
- fsnotify is a cross-platform library, and changes must work reasonably well on
all supported platforms.
- Changes will need to be compatible; old code should still compile, and the
runtime behaviour can't change in ways that are likely to lead to problems for
users.
Testing
-------
Just `go test ./...` runs all the tests; the CI runs this on all supported
platforms. Testing different platforms locally can be done with something like
[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
Use the `-short` flag to make the "stress test" run faster.
Writing new tests
-----------------
Scripts in the testdata directory allow creating test cases in a "shell-like"
syntax. The basic format is:
script
Output:
desired output
For example:
# Create a new empty file with some data.
watch /
echo data >/file
Output:
create /file
write /file
Just create a new file to add a new test; select which tests to run with
`-run TestScript/[path]`.
script
------
The script is a "shell-like" script:
cmd arg arg
Comments are supported with `#`:
# Comment
cmd arg arg # Comment
All operations are done in a temp directory; a path like "/foo" is rewritten to
"/tmp/TestFoo/foo".
Arguments can be quoted with `"` or `'`; there are no escapes and they're
functionally identical right now, but this may change in the future, so best to
assume shell-like rules.
touch "/file with spaces"
End-of-line escapes with `\` are not supported.
### Supported commands
watch path [ops] # Watch the path, reporting events for it. Nothing is
# watched by default. Optionally a list of ops can be
# given, as with AddWith(path, WithOps(...)).
unwatch path # Stop watching the path.
watchlist n # Assert watchlist length.
stop # Stop running the script; for debugging.
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
parallel by default, so -parallel=1 is probably a good
idea).
print [any strings] # Print text to stdout; for debugging.
touch path
mkdir [-p] dir
ln -s target link # Only ln -s supported.
mkfifo path
mknod dev path
mv src dst
rm [-r] path
chmod mode path # Octal only
sleep time-in-ms
cat path # Read path (does nothing with the data; just reads it).
echo str >>path # Append "str" to "path".
echo str >path # Truncate "path" and write "str".
require reason # Skip the test if "reason" is true; "skip" and
skip reason # "require" behave identical; it supports both for
# readability. Possible reasons are:
#
# always Always skip this test.
# symlink Symlinks are supported (requires admin
# permissions on Windows).
# mkfifo Platform doesn't support FIFO named sockets.
# mknod Platform doesn't support device nodes.
output
------
After `Output:` the desired output is given; this is indented by convention, but
that's not required.
The format of that is:
# Comment
event path # Comment
system:
event path
system2:
event path
Every event is one line, and any whitespace between the event and path are
ignored. The path can optionally be surrounded in ". Anything after a "#" is
ignored.
Platform-specific tests can be added after GOOS; for example:
watch /
touch /file
Output:
# Tested if nothing else matches
create /file
# Windows-specific test.
windows:
write /file
You can specify multiple platforms with a comma (e.g. "windows, linux:").
"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
[goon]: https://github.com/arp242/goon
[Vagrant]: https://www.vagrantup.com/
[integration_test.go]: /integration_test.go

View File

@@ -1,25 +0,0 @@
Copyright © 2012 The Go Authors. All rights reserved.
Copyright © fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of Google Inc. nor the names of its contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,182 +0,0 @@
fsnotify is a Go library to provide cross-platform filesystem notifications on
Windows, Linux, macOS, BSD, and illumos.
Go 1.17 or newer is required; the full documentation is at
https://pkg.go.dev/github.com/fsnotify/fsnotify
---
Platform support:
| Backend | OS | Status |
| :-------------------- | :--------- | :------------------------------------------------------------------------ |
| inotify | Linux | Supported |
| kqueue | BSD, macOS | Supported |
| ReadDirectoryChangesW | Windows | Supported |
| FEN | illumos | Supported |
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
Linux and illumos should include Android and Solaris, but these are currently
untested.
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
Usage
-----
A basic example:
```go
package main
import (
"log"
"github.com/fsnotify/fsnotify"
)
func main() {
// Create new watcher.
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
// Start listening for events.
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return
}
log.Println("event:", event)
if event.Has(fsnotify.Write) {
log.Println("modified file:", event.Name)
}
case err, ok := <-watcher.Errors:
if !ok {
return
}
log.Println("error:", err)
}
}
}()
// Add a path.
err = watcher.Add("/tmp")
if err != nil {
log.Fatal(err)
}
// Block main goroutine forever.
<-make(chan struct{})
}
```
Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
run with:
% go run ./cmd/fsnotify
Further detailed documentation can be found in godoc:
https://pkg.go.dev/github.com/fsnotify/fsnotify
FAQ
---
### Will a file still be watched when it's moved to another directory?
No, not unless you are watching the location it was moved to.
### Are subdirectories watched?
No, you must add watches for any directory you want to watch (a recursive
watcher is on the roadmap: [#18]).
[#18]: https://github.com/fsnotify/fsnotify/issues/18
### Do I have to watch the Error and Event channels in a goroutine?
Yes. You can read both channels in the same goroutine using `select` (you don't
need a separate goroutine for both channels; see the example).
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
fsnotify requires support from underlying OS to work. The current NFS and SMB
protocols does not provide network level support for file notifications, and
neither do the /proc and /sys virtual filesystems.
This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
[#9]: https://github.com/fsnotify/fsnotify/issues/9
### Why do I get many Chmod events?
Some programs may generate a lot of attribute changes; for example Spotlight on
macOS, anti-virus programs, backup applications, and some others are known to do
this. As a rule, it's typically best to ignore Chmod events. They're often not
useful, and tend to cause problems.
Spotlight indexing on macOS can result in multiple events (see [#15]). A
temporary workaround is to add your folder(s) to the *Spotlight Privacy
settings* until we have a native FSEvents implementation (see [#11]).
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#15]: https://github.com/fsnotify/fsnotify/issues/15
### Watching a file doesn't work well
Watching individual files (rather than directories) is generally not recommended
as many programs (especially editors) update files atomically: it will write to
a temporary file which is then moved to to destination, overwriting the original
(or some variant thereof). The watcher on the original file is now lost, as that
no longer exists.
The upshot of this is that a power failure or crash won't leave a half-written
file.
Watch the parent directory and use `Event.Name` to filter out files you're not
interested in. There is an example of this in `cmd/fsnotify/file.go`.
Platform-specific notes
-----------------------
### Linux
When a file is removed a REMOVE event won't be emitted until all file
descriptors are closed; it will emit a CHMOD instead:
fp := os.Open("file")
os.Remove("file") // CHMOD
fp.Close() // REMOVE
This is the event that inotify sends, so not much can be changed about this.
The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
the number of watches per user, and `fs.inotify.max_user_instances` specifies
the maximum number of inotify instances per user. Every Watcher you create is an
"instance", and every path you add is a "watch".
These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
`/proc/sys/fs/inotify/max_user_instances`
To increase them you can use `sysctl` or write the value to proc file:
# The default values on Linux 5.18
sysctl fs.inotify.max_user_watches=124983
sysctl fs.inotify.max_user_instances=128
To make the changes persist on reboot edit `/etc/sysctl.conf` or
`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
distro's documentation):
fs.inotify.max_user_watches=124983
fs.inotify.max_user_instances=128
Reaching the limit will result in a "no space left on device" or "too many open
files" error.
### kqueue (macOS, all BSD systems)
kqueue requires opening a file descriptor for every file that's being watched;
so if you're watching a directory with five files then that's six file
descriptors. You will run in to your system's "max open files" limit faster on
these platforms.
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
control the maximum number of open files.

View File

@@ -1,467 +0,0 @@
//go:build solaris
// FEN backend for illumos (supported) and Solaris (untested, but should work).
//
// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
package fsnotify
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
"time"
"github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
type fen struct {
*shared
Events chan Event
Errors chan error
mu sync.Mutex
port *unix.EventPort
dirs map[string]Op // Explicitly watched directories
watches map[string]Op // Explicitly watched non-directories
}
var defaultBufferSize = 0
func newBackend(ev chan Event, errs chan error) (backend, error) {
w := &fen{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
dirs: make(map[string]Op),
watches: make(map[string]Op),
}
var err error
w.port, err = unix.NewEventPort()
if err != nil {
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
}
go w.readEvents()
return w, nil
}
func (w *fen) Close() error {
if w.shared.close() {
return nil
}
return w.port.Close()
}
func (w *fen) Add(name string) error { return w.AddWith(name) }
func (w *fen) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
with := getOptions(opts...)
if !w.xSupports(with.op) {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
// Currently we resolve symlinks that were explicitly requested to be
// watched. Otherwise we would use LStat here.
stat, err := os.Stat(name)
if err != nil {
return err
}
// Associate all files in the directory.
if stat.IsDir() {
err := w.handleDirectory(name, stat, true, w.associateFile)
if err != nil {
return err
}
w.mu.Lock()
w.dirs[name] = with.op
w.mu.Unlock()
return nil
}
err = w.associateFile(name, stat, true)
if err != nil {
return err
}
w.mu.Lock()
w.watches[name] = with.op
w.mu.Unlock()
return nil
}
func (w *fen) Remove(name string) error {
if w.isClosed() {
return nil
}
if !w.port.PathIsWatched(name) {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
// The user has expressed an intent. Immediately remove this name from
// whichever watch list it might be in. If it's not in there the delete
// doesn't cause harm.
w.mu.Lock()
delete(w.watches, name)
delete(w.dirs, name)
w.mu.Unlock()
stat, err := os.Stat(name)
if err != nil {
return err
}
// Remove associations for every file in the directory.
if stat.IsDir() {
err := w.handleDirectory(name, stat, false, w.dissociateFile)
if err != nil {
return err
}
return nil
}
err = w.port.DissociatePath(name)
if err != nil {
return err
}
return nil
}
// readEvents contains the main loop that runs in a goroutine watching for events.
func (w *fen) readEvents() {
// If this function returns, the watcher has been closed and we can close
// these channels
defer func() {
close(w.Errors)
close(w.Events)
}()
pevents := make([]unix.PortEvent, 8)
for {
count, err := w.port.Get(pevents, 1, nil)
if err != nil && err != unix.ETIME {
// Interrupted system call (count should be 0) ignore and continue
if errors.Is(err, unix.EINTR) && count == 0 {
continue
}
// Get failed because we called w.Close()
if errors.Is(err, unix.EBADF) && w.isClosed() {
return
}
// There was an error not caused by calling w.Close()
if !w.sendError(fmt.Errorf("port.Get: %w", err)) {
return
}
}
p := pevents[:count]
for _, pevent := range p {
if pevent.Source != unix.PORT_SOURCE_FILE {
// Event from unexpected source received; should never happen.
if !w.sendError(errors.New("Event from unexpected source received")) {
return
}
continue
}
if debug {
internal.Debug(pevent.Path, pevent.Events)
}
err = w.handleEvent(&pevent)
if !w.sendError(err) {
return
}
}
}
}
func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
files, err := os.ReadDir(path)
if err != nil {
return err
}
// Handle all children of the directory.
for _, entry := range files {
finfo, err := entry.Info()
if err != nil {
return err
}
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
if err != nil {
return err
}
}
// And finally handle the directory itself.
return handler(path, stat, follow)
}
// handleEvent might need to emit more than one fsnotify event if the events
// bitmap matches more than one event type (e.g. the file was both modified and
// had the attributes changed between when the association was created and the
// when event was returned)
func (w *fen) handleEvent(event *unix.PortEvent) error {
var (
events = event.Events
path = event.Path
fmode = event.Cookie.(os.FileMode)
reRegister = true
)
w.mu.Lock()
_, watchedDir := w.dirs[path]
_, watchedPath := w.watches[path]
w.mu.Unlock()
isWatched := watchedDir || watchedPath
if events&unix.FILE_DELETE != 0 {
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
reRegister = false
}
if events&unix.FILE_RENAME_FROM != 0 {
if !w.sendEvent(Event{Name: path, Op: Rename}) {
return nil
}
// Don't keep watching the new file name
reRegister = false
}
if events&unix.FILE_RENAME_TO != 0 {
// We don't report a Rename event for this case, because Rename events
// are interpreted as referring to the _old_ name of the file, and in
// this case the event would refer to the new name of the file. This
// type of rename event is not supported by fsnotify.
// inotify reports a Remove event in this case, so we simulate this
// here.
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Don't keep watching the file that was removed
reRegister = false
}
// The file is gone, nothing left to do.
if !reRegister {
if watchedDir {
w.mu.Lock()
delete(w.dirs, path)
w.mu.Unlock()
}
if watchedPath {
w.mu.Lock()
delete(w.watches, path)
w.mu.Unlock()
}
return nil
}
// If we didn't get a deletion the file still exists and we're going to have
// to watch it again. Let's Stat it now so that we can compare permissions
// and have what we need to continue watching the file
stat, err := os.Lstat(path)
if err != nil {
// This is unexpected, but we should still emit an event. This happens
// most often on "rm -r" of a subdirectory inside a watched directory We
// get a modify event of something happening inside, but by the time we
// get here, the sudirectory is already gone. Clearly we were watching
// this path but now it is gone. Let's tell the user that it was
// removed.
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Suppress extra write events on removed directories; they are not
// informative and can be confusing.
return nil
}
// resolve symlinks that were explicitly watched as we would have at Add()
// time. this helps suppress spurious Chmod events on watched symlinks
if isWatched {
stat, err = os.Stat(path)
if err != nil {
// The symlink still exists, but the target is gone. Report the
// Remove similar to above.
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Don't return the error
}
}
if events&unix.FILE_MODIFIED != 0 {
if fmode.IsDir() && watchedDir {
if err := w.updateDirectory(path); err != nil {
return err
}
} else {
if !w.sendEvent(Event{Name: path, Op: Write}) {
return nil
}
}
}
if events&unix.FILE_ATTRIB != 0 && stat != nil {
// Only send Chmod if perms changed
if stat.Mode().Perm() != fmode.Perm() {
if !w.sendEvent(Event{Name: path, Op: Chmod}) {
return nil
}
}
}
if stat != nil {
// If we get here, it means we've hit an event above that requires us to
// continue watching the file or directory
err := w.associateFile(path, stat, isWatched)
if errors.Is(err, fs.ErrNotExist) {
// Path may have been removed since the stat.
err = nil
}
return err
}
return nil
}
// The directory was modified, so we must find unwatched entities and watch
// them. If something was removed from the directory, nothing will happen, as
// everything else should still be watched.
func (w *fen) updateDirectory(path string) error {
files, err := os.ReadDir(path)
if err != nil {
// Directory no longer exists: probably just deleted since we got the
// event.
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return err
}
for _, entry := range files {
path := filepath.Join(path, entry.Name())
if w.port.PathIsWatched(path) {
continue
}
finfo, err := entry.Info()
if err != nil {
return err
}
err = w.associateFile(path, finfo, false)
if errors.Is(err, fs.ErrNotExist) {
// File may have disappeared between getting the dir listing and
// adding the port: that's okay to ignore.
continue
}
if !w.sendError(err) {
return nil
}
if !w.sendEvent(Event{Name: path, Op: Create}) {
return nil
}
}
return nil
}
func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
if w.isClosed() {
return ErrClosed
}
// This is primarily protecting the call to AssociatePath but it is
// important and intentional that the call to PathIsWatched is also
// protected by this mutex. Without this mutex, AssociatePath has been seen
// to error out that the path is already associated.
w.mu.Lock()
defer w.mu.Unlock()
if w.port.PathIsWatched(path) {
// Remove the old association in favor of this one If we get ENOENT,
// then while the x/sys/unix wrapper still thought that this path was
// associated, the underlying event port did not. This call will have
// cleared up that discrepancy. The most likely cause is that the event
// has fired but we haven't processed it yet.
err := w.port.DissociatePath(path)
if err != nil && !errors.Is(err, unix.ENOENT) {
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
}
}
var events int
if !follow {
// Watch symlinks themselves rather than their targets unless this entry
// is explicitly watched.
events |= unix.FILE_NOFOLLOW
}
if true { // TODO: implement withOps()
events |= unix.FILE_MODIFIED
}
if true {
events |= unix.FILE_ATTRIB
}
err := w.port.AssociatePath(path, stat, events, stat.Mode())
if err != nil {
return fmt.Errorf("port.AssociatePath(%q): %w", path, err)
}
return nil
}
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
if !w.port.PathIsWatched(path) {
return nil
}
err := w.port.DissociatePath(path)
if err != nil {
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
}
return nil
}
func (w *fen) WatchList() []string {
if w.isClosed() {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches)+len(w.dirs))
for pathname := range w.dirs {
entries = append(entries, pathname)
}
for pathname := range w.watches {
entries = append(entries, pathname)
}
return entries
}
func (w *fen) xSupports(op Op) bool {
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
return false
}
return true
}

View File

@@ -1,583 +0,0 @@
//go:build linux && !appengine
package fsnotify
import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"sync"
"time"
"unsafe"
"github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
type inotify struct {
*shared
Events chan Event
Errors chan error
// Store fd here as os.File.Read() will no longer return on close after
// calling Fd(). See: https://github.com/golang/go/issues/26439
fd int
inotifyFile *os.File
watches *watches
doneResp chan struct{} // Channel to respond to Close
// Store rename cookies in an array, with the index wrapping to 0. Almost
// all of the time what we get is a MOVED_FROM to set the cookie and the
// next event inotify sends will be MOVED_TO to read it. However, this is
// not guaranteed as described in inotify(7) and we may get other events
// between the two MOVED_* events (including other MOVED_* ones).
//
// A second issue is that moving a file outside the watched directory will
// trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
// read and delete it. So just storing it in a map would slowly leak memory.
//
// Doing it like this gives us a simple fast LRU-cache that won't allocate.
// Ten items should be more than enough for our purpose, and a loop over
// such a short array is faster than a map access anyway (not that it hugely
// matters since we're talking about hundreds of ns at the most, but still).
cookies [10]koekje
cookieIndex uint8
cookiesMu sync.Mutex
}
type (
watches struct {
wd map[uint32]*watch // wd → watch
path map[string]uint32 // pathname → wd
}
watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
path string // Watch path.
recurse bool // Recursion with ./...?
}
koekje struct {
cookie uint32
path string
}
)
func newWatches() *watches {
return &watches{
wd: make(map[uint32]*watch),
path: make(map[string]uint32),
}
}
func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] }
func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] }
func (w *watches) len() int { return len(w.wd) }
func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd }
func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) }
func (w *watches) removePath(path string) ([]uint32, error) {
path, recurse := recursivePath(path)
wd, ok := w.path[path]
if !ok {
return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
}
watch := w.wd[wd]
if recurse && !watch.recurse {
return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
}
delete(w.path, path)
delete(w.wd, wd)
if !watch.recurse {
return []uint32{wd}, nil
}
wds := make([]uint32, 0, 8)
wds = append(wds, wd)
for p, rwd := range w.path {
if strings.HasPrefix(p, path) {
delete(w.path, p)
delete(w.wd, rwd)
wds = append(wds, rwd)
}
}
return wds, nil
}
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
var existing *watch
wd, ok := w.path[path]
if ok {
existing = w.wd[wd]
}
upd, err := f(existing)
if err != nil {
return err
}
if upd != nil {
w.wd[upd.wd] = upd
w.path[upd.path] = upd.wd
if upd.wd != wd {
delete(w.wd, wd)
}
}
return nil
}
var defaultBufferSize = 0
func newBackend(ev chan Event, errs chan error) (backend, error) {
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
// I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
if fd == -1 {
return nil, errno
}
w := &inotify{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
watches: newWatches(),
doneResp: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
func (w *inotify) Close() error {
if w.shared.close() {
return nil
}
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
err := w.inotifyFile.Close()
if err != nil {
return err
}
<-w.doneResp // Wait for readEvents() to finish.
return nil
}
func (w *inotify) Add(name string) error { return w.AddWith(name) }
func (w *inotify) AddWith(path string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
time.Now().Format("15:04:05.000000000"), path)
}
with := getOptions(opts...)
if !w.xSupports(with.op) {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
add := func(path string, with withOpts, recurse bool) error {
var flags uint32
if with.noFollow {
flags |= unix.IN_DONT_FOLLOW
}
if with.op.Has(Create) {
flags |= unix.IN_CREATE
}
if with.op.Has(Write) {
flags |= unix.IN_MODIFY
}
if with.op.Has(Remove) {
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
}
if with.op.Has(Rename) {
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
}
if with.op.Has(Chmod) {
flags |= unix.IN_ATTRIB
}
if with.op.Has(xUnportableOpen) {
flags |= unix.IN_OPEN
}
if with.op.Has(xUnportableRead) {
flags |= unix.IN_ACCESS
}
if with.op.Has(xUnportableCloseWrite) {
flags |= unix.IN_CLOSE_WRITE
}
if with.op.Has(xUnportableCloseRead) {
flags |= unix.IN_CLOSE_NOWRITE
}
return w.register(path, flags, recurse)
}
w.mu.Lock()
defer w.mu.Unlock()
path, recurse := recursivePath(path)
if recurse {
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.IsDir() {
if root == path {
return fmt.Errorf("fsnotify: not a directory: %q", path)
}
return nil
}
// Send a Create event when adding new directory from a recursive
// watch; this is for "mkdir -p one/two/three". Usually all those
// directories will be created before we can set up watchers on the
// subdirectories, so only "one" would be sent as a Create event and
// not "one/two" and "one/two/three" (inotifywait -r has the same
// problem).
if with.sendCreate && root != path {
w.sendEvent(Event{Name: root, Op: Create})
}
return add(root, with, true)
})
}
return add(path, with, false)
}
func (w *inotify) register(path string, flags uint32, recurse bool) error {
return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
if existing != nil {
flags |= existing.flags | unix.IN_MASK_ADD
}
wd, err := unix.InotifyAddWatch(w.fd, path, flags)
if wd == -1 {
return nil, err
}
if e, ok := w.watches.wd[uint32(wd)]; ok {
return e, nil
}
if existing == nil {
return &watch{
wd: uint32(wd),
path: path,
flags: flags,
recurse: recurse,
}, nil
}
existing.wd = uint32(wd)
existing.flags = flags
return existing, nil
})
}
func (w *inotify) Remove(name string) error {
if w.isClosed() {
return nil
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(filepath.Clean(name))
}
func (w *inotify) remove(name string) error {
wds, err := w.watches.removePath(name)
if err != nil {
return err
}
for _, wd := range wds {
_, err := unix.InotifyRmWatch(w.fd, wd)
if err != nil {
// TODO: Perhaps it's not helpful to return an error here in every
// case; the only two possible errors are:
//
// EBADF, which happens when w.fd is not a valid file descriptor of
// any kind.
//
// EINVAL, which is when fd is not an inotify descriptor or wd is
// not a valid watch descriptor. Watch descriptors are invalidated
// when they are removed explicitly or implicitly; explicitly by
// inotify_rm_watch, implicitly when the file they are watching is
// deleted.
return err
}
}
return nil
}
func (w *inotify) WatchList() []string {
if w.isClosed() {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, w.watches.len())
for pathname := range w.watches.path {
entries = append(entries, pathname)
}
return entries
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *inotify) readEvents() {
defer func() {
close(w.doneResp)
close(w.Errors)
close(w.Events)
}()
var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
for {
if w.isClosed() {
return
}
n, err := w.inotifyFile.Read(buf[:])
if err != nil {
if errors.Is(err, os.ErrClosed) {
return
}
if !w.sendError(err) {
return
}
continue
}
if n < unix.SizeofInotifyEvent {
err := errors.New("notify: short read in readEvents()") // Read was too short.
if n == 0 {
err = io.EOF // If EOF is received. This should really never happen.
}
if !w.sendError(err) {
return
}
continue
}
// We don't know how many events we just read into the buffer While the
// offset points to at least one whole event.
var offset uint32
for offset <= uint32(n-unix.SizeofInotifyEvent) {
// Point to the event in the buffer.
inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 {
if !w.sendError(ErrEventOverflow) {
return
}
}
ev, ok := w.handleEvent(inEvent, &buf, offset)
if !ok {
return
}
if !w.sendEvent(ev) {
return
}
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + inEvent.Len
}
}
}
func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) {
w.mu.Lock()
defer w.mu.Unlock()
/// If the event happened to the watched directory or the watched file, the
/// kernel doesn't append the filename to the event, but we would like to
/// always fill the the "Name" field with a valid filename. We retrieve the
/// path of the watch from the "paths" map.
///
/// Can be nil if Remove() was called in another goroutine for this path
/// inbetween reading the events from the kernel and reading the internal
/// state. Not much we can do about it, so just skip. See #616.
watch := w.watches.byWd(uint32(inEvent.Wd))
if watch == nil {
return Event{}, true
}
var (
name = watch.path
nameLen = uint32(inEvent.Len)
)
if nameLen > 0 {
/// Point "bytes" at the first byte of the filename
bb := *buf
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00")
}
if debug {
internal.Debug(name, inEvent.Mask, inEvent.Cookie)
}
if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 {
w.watches.remove(watch)
return Event{}, true
}
// inotify will automatically remove the watch on deletes; just need
// to clean our state here.
if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
w.watches.remove(watch)
}
// We can't really update the state when a watched path is moved; only
// IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch.
if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
if watch.recurse { // Do nothing
return Event{}, true
}
err := w.remove(watch.path)
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
if !w.sendError(err) {
return Event{}, false
}
}
}
/// Skip if we're watching both this path and the parent; the parent will
/// already send a delete so no need to do it twice.
if inEvent.Mask&unix.IN_DELETE_SELF != 0 {
_, ok := w.watches.path[filepath.Dir(watch.path)]
if ok {
return Event{}, true
}
}
ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie)
// Need to update watch path for recurse.
if watch.recurse {
isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR
/// New directory created: set up watch on it.
if isDir && ev.Has(Create) {
err := w.register(ev.Name, watch.flags, true)
if !w.sendError(err) {
return Event{}, false
}
// This was a directory rename, so we need to update all the
// children.
//
// TODO: this is of course pretty slow; we should use a better data
// structure for storing all of this, e.g. store children in the
// watch. I have some code for this in my kqueue refactor we can use
// in the future. For now I'm okay with this as it's not publicly
// available. Correctness first, performance second.
if ev.renamedFrom != "" {
for k, ww := range w.watches.wd {
if k == watch.wd || ww.path == ev.Name {
continue
}
if strings.HasPrefix(ww.path, ev.renamedFrom) {
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
w.watches.wd[k] = ww
}
}
}
}
}
return ev, true
}
func (w *inotify) isRecursive(path string) bool {
ww := w.watches.byPath(path)
if ww == nil { // path could be a file, so also check the Dir.
ww = w.watches.byPath(filepath.Dir(path))
}
return ww != nil && ww.recurse
}
func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
}
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
e.Op |= Remove
}
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
if mask&unix.IN_OPEN == unix.IN_OPEN {
e.Op |= xUnportableOpen
}
if mask&unix.IN_ACCESS == unix.IN_ACCESS {
e.Op |= xUnportableRead
}
if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
e.Op |= xUnportableCloseWrite
}
if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
e.Op |= xUnportableCloseRead
}
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
if cookie != 0 {
if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
w.cookiesMu.Lock()
w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
w.cookieIndex++
if w.cookieIndex > 9 {
w.cookieIndex = 0
}
w.cookiesMu.Unlock()
} else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
w.cookiesMu.Lock()
var prev string
for _, c := range w.cookies {
if c.cookie == cookie {
prev = c.path
break
}
}
w.cookiesMu.Unlock()
e.renamedFrom = prev
}
}
return e
}
func (w *inotify) xSupports(op Op) bool {
return true // Supports everything.
}
func (w *inotify) state() {
w.mu.Lock()
defer w.mu.Unlock()
for wd, ww := range w.watches.wd {
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
}
}

View File

@@ -1,705 +0,0 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"time"
"github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
type kqueue struct {
*shared
Events chan Event
Errors chan error
kq int // File descriptor (as returned by the kqueue() syscall).
closepipe [2]int // Pipe used for closing kq.
watches *watches
}
type (
watches struct {
mu sync.RWMutex
wd map[int]watch // wd → watch
path map[string]int // pathname → wd
byDir map[string]map[int]struct{} // dirname(path) → wd
seen map[string]struct{} // Keep track of if we know this file exists.
byUser map[string]struct{} // Watches added with Watcher.Add()
}
watch struct {
wd int
name string
linkName string // In case of links; name is the target, and this is the link.
isDir bool
dirFlags uint32
}
)
func newWatches() *watches {
return &watches{
wd: make(map[int]watch),
path: make(map[string]int),
byDir: make(map[string]map[int]struct{}),
seen: make(map[string]struct{}),
byUser: make(map[string]struct{}),
}
}
func (w *watches) listPaths(userOnly bool) []string {
w.mu.RLock()
defer w.mu.RUnlock()
if userOnly {
l := make([]string, 0, len(w.byUser))
for p := range w.byUser {
l = append(l, p)
}
return l
}
l := make([]string, 0, len(w.path))
for p := range w.path {
l = append(l, p)
}
return l
}
func (w *watches) watchesInDir(path string) []string {
w.mu.RLock()
defer w.mu.RUnlock()
l := make([]string, 0, 4)
for fd := range w.byDir[path] {
info := w.wd[fd]
if _, ok := w.byUser[info.name]; !ok {
l = append(l, info.name)
}
}
return l
}
// Mark path as added by the user.
func (w *watches) addUserWatch(path string) {
w.mu.Lock()
defer w.mu.Unlock()
w.byUser[path] = struct{}{}
}
func (w *watches) addLink(path string, fd int) {
w.mu.Lock()
defer w.mu.Unlock()
w.path[path] = fd
w.seen[path] = struct{}{}
}
func (w *watches) add(path, linkPath string, fd int, isDir bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.path[path] = fd
w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir}
parent := filepath.Dir(path)
byDir, ok := w.byDir[parent]
if !ok {
byDir = make(map[int]struct{}, 1)
w.byDir[parent] = byDir
}
byDir[fd] = struct{}{}
}
func (w *watches) byWd(fd int) (watch, bool) {
w.mu.RLock()
defer w.mu.RUnlock()
info, ok := w.wd[fd]
return info, ok
}
func (w *watches) byPath(path string) (watch, bool) {
w.mu.RLock()
defer w.mu.RUnlock()
info, ok := w.wd[w.path[path]]
return info, ok
}
func (w *watches) updateDirFlags(path string, flags uint32) bool {
w.mu.Lock()
defer w.mu.Unlock()
fd, ok := w.path[path]
if !ok { // Already deleted: don't re-set it here.
return false
}
info := w.wd[fd]
info.dirFlags = flags
w.wd[fd] = info
return true
}
func (w *watches) remove(fd int, path string) bool {
w.mu.Lock()
defer w.mu.Unlock()
isDir := w.wd[fd].isDir
delete(w.path, path)
delete(w.byUser, path)
parent := filepath.Dir(path)
delete(w.byDir[parent], fd)
if len(w.byDir[parent]) == 0 {
delete(w.byDir, parent)
}
delete(w.wd, fd)
delete(w.seen, path)
return isDir
}
func (w *watches) markSeen(path string, exists bool) {
w.mu.Lock()
defer w.mu.Unlock()
if exists {
w.seen[path] = struct{}{}
} else {
delete(w.seen, path)
}
}
func (w *watches) seenBefore(path string) bool {
w.mu.RLock()
defer w.mu.RUnlock()
_, ok := w.seen[path]
return ok
}
var defaultBufferSize = 0
func newBackend(ev chan Event, errs chan error) (backend, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
}
w := &kqueue{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
kq: kq,
closepipe: closepipe,
watches: newWatches(),
}
go w.readEvents()
return w, nil
}
// newKqueue creates a new kernel event queue and returns a descriptor.
//
// This registers a new event on closepipe, which will trigger an event when
// it's closed. This way we can use kevent() without timeout/polling; without
// the closepipe, it would block forever and we wouldn't be able to stop it at
// all.
func newKqueue() (kq int, closepipe [2]int, err error) {
kq, err = unix.Kqueue()
if err != nil {
return kq, closepipe, err
}
// Register the close pipe.
err = unix.Pipe(closepipe[:])
if err != nil {
unix.Close(kq)
return kq, closepipe, err
}
unix.CloseOnExec(closepipe[0])
unix.CloseOnExec(closepipe[1])
// Register changes to listen on the closepipe.
changes := make([]unix.Kevent_t, 1)
// SetKevent converts int to the platform-specific types.
unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
ok, err := unix.Kevent(kq, changes, nil, nil)
if ok == -1 {
unix.Close(kq)
unix.Close(closepipe[0])
unix.Close(closepipe[1])
return kq, closepipe, err
}
return kq, closepipe, nil
}
func (w *kqueue) Close() error {
if w.shared.close() {
return nil
}
pathsToRemove := w.watches.listPaths(false)
for _, name := range pathsToRemove {
w.Remove(name)
}
unix.Close(w.closepipe[1]) // Send "quit" message to readEvents
return nil
}
func (w *kqueue) Add(name string) error { return w.AddWith(name) }
func (w *kqueue) AddWith(name string, opts ...addOpt) error {
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
with := getOptions(opts...)
if !w.xSupports(with.op) {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
_, err := w.addWatch(name, noteAllEvents, false)
if err != nil {
return err
}
w.watches.addUserWatch(name)
return nil
}
func (w *kqueue) Remove(name string) error {
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
return w.remove(name, true)
}
func (w *kqueue) remove(name string, unwatchFiles bool) error {
if w.isClosed() {
return nil
}
name = filepath.Clean(name)
info, ok := w.watches.byPath(name)
if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
err := w.register([]int{info.wd}, unix.EV_DELETE, 0)
if err != nil {
return err
}
unix.Close(info.wd)
isDir := w.watches.remove(info.wd, name)
// Find all watched paths that are in this directory that are not external.
if unwatchFiles && isDir {
pathsToRemove := w.watches.watchesInDir(name)
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error to
// the user, as that will just confuse them with an error about a
// path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
func (w *kqueue) WatchList() []string {
if w.isClosed() {
return nil
}
return w.watches.listPaths(true)
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// addWatch adds name to the watched file set; the flags are interpreted as
// described in kevent(2).
//
// Returns the real path to the file which was added, with symlinks resolved.
func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) {
if w.isClosed() {
return "", ErrClosed
}
name = filepath.Clean(name)
info, alreadyWatching := w.watches.byPath(name)
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
// Don't watch sockets or named pipes.
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
return "", nil
}
// Follow symlinks, but only for paths added with Add(), and not paths
// we're adding from internalWatch from a listdir.
if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink {
link, err := os.Readlink(name)
if err != nil {
return "", err
}
if !filepath.IsAbs(link) {
link = filepath.Join(filepath.Dir(name), link)
}
_, alreadyWatching = w.watches.byPath(link)
if alreadyWatching {
// Add to watches so we don't get spurious Create events later
// on when we diff the directories.
w.watches.addLink(name, 0)
return link, nil
}
info.linkName = name
name = link
fi, err = os.Lstat(name)
if err != nil {
return "", err
}
}
// Retry on EINTR; open() can return EINTR in practice on macOS.
// See #354, and Go issues 11180 and 39237.
for {
info.wd, err = unix.Open(name, openMode, 0)
if err == nil {
break
}
if errors.Is(err, unix.EINTR) {
continue
}
return "", err
}
info.isDir = fi.IsDir()
}
err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
if err != nil {
unix.Close(info.wd)
return "", err
}
if !alreadyWatching {
w.watches.add(name, info.linkName, info.wd, info.isDir)
}
// Watch the directory if it has not been watched before, or if it was
// watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
if info.isDir {
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
if !w.watches.updateDirFlags(name, flags) {
return "", nil
}
if watchDir {
d := name
if info.linkName != "" {
d = info.linkName
}
if err := w.watchDirectoryFiles(d); err != nil {
return "", err
}
}
}
return name, nil
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *kqueue) readEvents() {
defer func() {
close(w.Events)
close(w.Errors)
_ = unix.Close(w.kq)
unix.Close(w.closepipe[0])
}()
eventBuffer := make([]unix.Kevent_t, 10)
for {
kevents, err := w.read(eventBuffer)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
return
}
}
for _, kevent := range kevents {
var (
wd = int(kevent.Ident)
mask = uint32(kevent.Fflags)
)
// Shut down the loop when the pipe is closed, but only after all
// other events have been processed.
if wd == w.closepipe[0] {
return
}
path, ok := w.watches.byWd(wd)
if debug {
internal.Debug(path.name, &kevent)
}
// On macOS it seems that sometimes an event with Ident=0 is
// delivered, and no other flags/information beyond that, even
// though we never saw such a file descriptor. For example in
// TestWatchSymlink/277 (usually at the end, but sometimes sooner):
//
// fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent)
// unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
// unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
//
// The first is a normal event, the second with Ident 0. No error
// flag, no data, no ... nothing.
//
// I read a bit through bsd/kern_event.c from the xnu source, but I
// don't really see an obvious location where this is triggered
// this doesn't seem intentional, but idk...
//
// Technically fd 0 is a valid descriptor, so only skip it if
// there's no path, and if we're on macOS.
if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" {
continue
}
event := w.newEvent(path.name, path.linkName, mask)
if event.Has(Rename) || event.Has(Remove) {
w.remove(event.Name, false)
w.watches.markSeen(event.Name, false)
}
if path.isDir && event.Has(Write) && !event.Has(Remove) {
w.dirChange(event.Name)
} else if !w.sendEvent(event) {
return
}
if event.Has(Remove) {
// Look for a file that may have overwritten this; for example,
// mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
_, found := w.watches.byPath(fileDir)
if found {
// TODO: this branch is never triggered in any test.
// Added in d6220df (2012).
// isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111
//
// I don't really get how this can be triggered either.
// And it wasn't triggered in the patch that added it,
// either.
//
// Original also had a comment:
// make sure the directory exists before we watch for
// changes. When we do a recursive watch and perform
// rm -rf, the parent directory might have gone
// missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the
// parent directory.
err := w.dirChange(fileDir)
if !w.sendError(err) {
return
}
}
} else {
path := filepath.Clean(event.Name)
if fi, err := os.Lstat(path); err == nil {
err := w.sendCreateIfNew(path, fi)
if !w.sendError(err) {
return
}
}
}
}
}
}
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
func (w *kqueue) newEvent(name, linkName string, mask uint32) Event {
e := Event{Name: name}
if linkName != "" {
// If the user watched "/path/link" then emit events as "/path/link"
// rather than "/path/target".
e.Name = linkName
}
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
e.Op |= Write
}
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
e.Op |= Rename
}
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
// No point sending a write and delete event at the same time: if it's gone,
// then it's gone.
if e.Op.Has(Write) && e.Op.Has(Remove) {
e.Op &^= Write
}
return e
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *kqueue) watchDirectoryFiles(dirPath string) error {
files, err := os.ReadDir(dirPath)
if err != nil {
return err
}
for _, f := range files {
path := filepath.Join(dirPath, f.Name())
fi, err := f.Info()
if err != nil {
return fmt.Errorf("%q: %w", path, err)
}
cleanPath, err := w.internalWatch(path, fi)
if err != nil {
// No permission to read the file; that's not a problem: just skip.
// But do add it to w.fileExists to prevent it from being picked up
// as a "new" file later (it still shows up in the directory
// listing).
switch {
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
cleanPath = filepath.Clean(path)
default:
return fmt.Errorf("%q: %w", path, err)
}
}
w.watches.markSeen(cleanPath, true)
}
return nil
}
// Search the directory for new files and send an event for them.
//
// This functionality is to have the BSD watcher match the inotify, which sends
// a create event for files created in a watched directory.
func (w *kqueue) dirChange(dir string) error {
files, err := os.ReadDir(dir)
if err != nil {
// Directory no longer exists: we can ignore this safely. kqueue will
// still give us the correct events.
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err)
}
for _, f := range files {
fi, err := f.Info()
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange: %w", err)
}
err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
// Don't need to send an error if this file isn't readable.
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange: %w", err)
}
}
return nil
}
// Send a create event if the file isn't already being tracked, and start
// watching this file.
func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error {
if !w.watches.seenBefore(path) {
if !w.sendEvent(Event{Name: path, Op: Create}) {
return nil
}
}
// Like watchDirectoryFiles, but without doing another ReadDir.
path, err := w.internalWatch(path, fi)
if err != nil {
return err
}
w.watches.markSeen(path, true)
return nil
}
func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
// mimic Linux providing delete events for subdirectories, but preserve
// the flags used if currently watching subdirectory
info, _ := w.watches.byPath(name)
return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true)
}
// Watch file to mimic Linux inotify.
return w.addWatch(name, noteAllEvents, true)
}
// Register events with the queue.
func (w *kqueue) register(fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types.
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
changes[i].Fflags = fflags
}
// Register the events.
success, err := unix.Kevent(w.kq, changes, nil, nil)
if success == -1 {
return err
}
return nil
}
// read retrieves pending events, or waits until an event occurs.
func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(w.kq, nil, events, nil)
if err != nil {
return nil, err
}
return events[0:n], nil
}
func (w *kqueue) xSupports(op Op) bool {
//if runtime.GOOS == "freebsd" {
// return true // Supports everything.
//}
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
return false
}
return true
}

View File

@@ -1,22 +0,0 @@
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
package fsnotify
import "errors"
type other struct {
Events chan Event
Errors chan error
}
var defaultBufferSize = 0
func newBackend(ev chan Event, errs chan error) (backend, error) {
return nil, errors.New("fsnotify not supported on the current platform")
}
func (w *other) Close() error { return nil }
func (w *other) WatchList() []string { return nil }
func (w *other) Add(name string) error { return nil }
func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
func (w *other) Remove(name string) error { return nil }
func (w *other) xSupports(op Op) bool { return false }

View File

@@ -1,680 +0,0 @@
//go:build windows
// Windows backend based on ReadDirectoryChangesW()
//
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"time"
"unsafe"
"github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/windows"
)
type readDirChangesW struct {
Events chan Event
Errors chan error
port windows.Handle // Handle to completion port
input chan *input // Inputs to the reader are sent on this channel
done chan chan<- error
mu sync.Mutex // Protects access to watches, closed
watches watchMap // Map of watches (key: i-number)
closed bool // Set to true when Close() is first called
}
var defaultBufferSize = 50
func newBackend(ev chan Event, errs chan error) (backend, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
}
w := &readDirChangesW{
Events: ev,
Errors: errs,
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
done: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
func (w *readDirChangesW) isClosed() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.closed
}
func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
if mask == 0 {
return false
}
event := w.newEvent(name, uint32(mask))
event.renamedFrom = renamedFrom
select {
case ch := <-w.done:
w.done <- ch
case w.Events <- event:
}
return true
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *readDirChangesW) sendError(err error) bool {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *readDirChangesW) Close() error {
if w.isClosed() {
return nil
}
w.mu.Lock()
w.closed = true
w.mu.Unlock()
// Send "done" message to the reader goroutine
ch := make(chan error)
w.done <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
}
with := getOptions(opts...)
if !w.xSupports(with.op) {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
if with.bufsize < 4096 {
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
}
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
bufsize: with.bufsize,
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
func (w *readDirChangesW) Remove(name string) error {
if w.isClosed() {
return nil
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
}
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
func (w *readDirChangesW) WatchList() []string {
if w.isClosed() {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for _, entry := range w.watches {
for _, watchEntry := range entry {
for name := range watchEntry.names {
entries = append(entries, filepath.Join(watchEntry.path, name))
}
// the directory itself is being watched
if watchEntry.mask != 0 {
entries = append(entries, watchEntry.path)
}
}
}
return entries
}
// These options are from the old golang.org/x/exp/winfsnotify, where you could
// add various options to the watch. This has long since been removed.
//
// The "sys" in the name is misleading as they're not part of any "system".
//
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
const (
sysFSALLEVENTS = 0xfff
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
sysFSMODIFY = 0x2
sysFSMOVE = 0xc0
sysFSMOVEDFROM = 0x40
sysFSMOVEDTO = 0x80
sysFSMOVESELF = 0x800
sysFSIGNORED = 0x8000
)
func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
}
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
e.Op |= Remove
}
if mask&sysFSMODIFY == sysFSMODIFY {
e.Op |= Write
}
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
return e
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
bufsize int
reply chan error
}
type inode struct {
handle windows.Handle
volume uint32
index uint64
}
type watch struct {
ov windows.Overlapped
ino *inode // i-number
recurse bool // Recursive watch?
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf []byte // buffer, allocated later
}
type (
indexMap map[uint64]*watch
watchMap map[uint32]indexMap
)
func (w *readDirChangesW) wakeupReader() error {
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if err != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", err)
}
return nil
}
func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
if err != nil {
return "", os.NewSyscallError("GetFileAttributes", err)
}
if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
windows.FILE_LIST_DIRECTORY,
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
nil, windows.OPEN_EXISTING,
windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
if err != nil {
return nil, os.NewSyscallError("CreateFile", err)
}
var fi windows.ByHandleFileInformation
err = windows.GetFileInformationByHandle(h, &fi)
if err != nil {
windows.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", err)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname)
if err != nil {
return err
}
ino, err := w.getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
_, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
if err != nil {
windows.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", err)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
recurse: recurse,
buf: make([]byte, bufsize),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
windows.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
err = w.startRead(watchEntry)
if err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *readDirChangesW) remWatch(pathname string) error {
pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname)
if err != nil {
return err
}
ino, err := w.getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
if recurse && !watch.recurse {
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
}
err = windows.CloseHandle(ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CloseHandle", err))
}
if watch == nil {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
}
if pathname == dir {
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *readDirChangesW) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *readDirChangesW) startRead(watch *watch) error {
err := windows.CancelIo(watch.ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CancelIo", err))
w.deleteWatch(watch)
}
mask := w.toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= w.toWindowsFlags(m)
}
if mask == 0 {
err := windows.CloseHandle(watch.ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CloseHandle", err))
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
// We need to pass the array, rather than the slice.
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
watch.recurse, mask, nil, &watch.ov, 0)
if rdErr != nil {
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
func (w *readDirChangesW) readEvents() {
var (
n uint32
key uintptr
ov *windows.Overlapped
)
runtime.LockOSThread()
for {
// This error is handled after the watch == nil check below.
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.done:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
err := windows.CloseHandle(w.port)
if err != nil {
err = os.NewSyscallError("CloseHandle", err)
}
close(w.Events)
close(w.Errors)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch qErr {
case nil:
// No error
case windows.ERROR_MORE_DATA:
if watch == nil {
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case windows.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case windows.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
continue
}
var offset uint32
for {
if n == 0 {
w.sendError(ErrEventOverflow)
break
}
// Point "raw" to the event in the buffer
raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
// Create a buf that is the size of the path name
size := int(raw.FileNameLength / 2)
var buf []uint16
// TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
sh.Len = size
sh.Cap = size
name := windows.UTF16ToString(buf)
fullname := filepath.Join(watch.path, name)
if debug {
internal.Debug(fullname, raw.Action)
}
var mask uint64
switch raw.Action {
case windows.FILE_ACTION_REMOVED:
mask = sysFSDELETESELF
case windows.FILE_ACTION_MODIFIED:
mask = sysFSMODIFY
case windows.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case windows.FILE_ACTION_RENAMED_NEW_NAME:
// Update saved path of all sub-watches.
old := filepath.Join(watch.path, watch.rename)
w.mu.Lock()
for _, watchMap := range w.watches {
for _, ww := range watchMap {
if strings.HasPrefix(ww.path, old) {
ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
}
}
}
w.mu.Unlock()
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sysFSMOVESELF
}
}
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
w.sendEvent(fullname, "", watch.names[name]&mask)
}
if raw.Action == windows.FILE_ACTION_REMOVED {
w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
} else {
w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
}
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
//lint:ignore ST1005 Windows should be capitalized
w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
break
}
}
if err := w.startRead(watch); err != nil {
w.sendError(err)
}
}
}
func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSMODIFY != 0 {
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
switch action {
case windows.FILE_ACTION_ADDED:
return sysFSCREATE
case windows.FILE_ACTION_REMOVED:
return sysFSDELETE
case windows.FILE_ACTION_MODIFIED:
return sysFSMODIFY
case windows.FILE_ACTION_RENAMED_OLD_NAME:
return sysFSMOVEDFROM
case windows.FILE_ACTION_RENAMED_NEW_NAME:
return sysFSMOVEDTO
}
return 0
}
func (w *readDirChangesW) xSupports(op Op) bool {
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
return false
}
return true
}

Some files were not shown because too many files have changed in this diff Show More