From 550bd8a218224e6c73201f444387d66a299f438b Mon Sep 17 00:00:00 2001
From: zhangqian <zhangqian@123.com>
Date: 星期二, 15 八月 2023 19:56:35 +0800
Subject: [PATCH] 消费排程任务存储任务数据,给前端展示任务列表
---
pkg/nsqclient/pointer.go | 57 +
model/schedule_task.go | 260 +++++
.gitignore | 3
pkg/nsqclient/README.md | 54 +
model/procedures.go | 230 ++++
go.mod | 7
docs/swagger.yaml | 120 -
model/request/task.go | 5
pkg/contextx/contextx.go | 13
pkg/nsqclient/cmd/main.go | 141 ++
pkg/nsqclient/cmd/make.sh | 10
model/request/schedule_task.go | 50 +
nsq/model.go | 52 +
pkg/nsqclient/cmd/TST/test/test.go | 78 +
nsq/nsq.go | 24
pkg/structx/structx.go | 16
service/task.go | 27
nsq/msg_handler.go | 51 +
logs/apsClient.info.log | 148 +++
api/v1/notice.go | 2
pkg/nsqclient/consumer.go | 99 ++
model/request/user.go | 8
logs/apsClient.err.log | 35
docs/docs.go | 160 +--
nsq/consumer.go | 37
nsq/producer.go | 60 +
api/v1/task.go | 47
docs/swagger.json | 160 +--
pkg/nsqclient/conn.go | 45
constvar/const.go | 12
model/sqlite.go | 1
/dev/null | 271 -----
model/materials.go | 94 -
go.sum | 14
pkg/nsqclient/producer.go | 139 ++
model/index.go | 9
pkg/nsqclient/cmd/TST/ctest/ctest.cpp | 101 ++
conf/config.go | 12
conf/apsClient.json | 14
router/index.go | 6
main.go | 6
pkg/sqlitex/sqlitex.go | 57 +
pkg/nsqclient/channel.go | 134 ++
pkg/nsqclient/pool.go | 25
44 files changed, 2,270 insertions(+), 624 deletions(-)
diff --git a/.gitignore b/.gitignore
index 10e3992..5f530a0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,4 +21,5 @@
*.exe
*.test
-apsClient
\ No newline at end of file
+apsClient
+aps.db
\ No newline at end of file
diff --git a/api/v1/notice.go b/api/v1/notice.go
index cd3bed8..53b7a01 100644
--- a/api/v1/notice.go
+++ b/api/v1/notice.go
@@ -17,7 +17,7 @@
// @Summary 浠诲姟寮�鍚�氱煡
// @Produce application/json
// @Param object body request.TaskInfo true "鏌ヨ鍙傛暟"
-// @Success 200 {object} contextx.Response{data=response.LoginResponse} "鎴愬姛"
+// @Success 200 {object} contextx.Response{} "鎴愬姛"
// @Router /v1/notice/task/start [post]
func (slf *NoticeApi) TaskStart(c *gin.Context) {
var params request.TaskInfo
diff --git a/api/v1/task.go b/api/v1/task.go
new file mode 100644
index 0000000..196ee69
--- /dev/null
+++ b/api/v1/task.go
@@ -0,0 +1,47 @@
+package v1
+
+import (
+ "apsClient/model/request"
+ _ "apsClient/model/response"
+ "apsClient/pkg/contextx"
+ "apsClient/pkg/ecode"
+ "apsClient/service"
+ "encoding/json"
+ "github.com/gin-gonic/gin"
+)
+
+type TaskApi struct{}
+
+// TaskList
+// @Tags Base
+// @Summary 浠诲姟寮�鍚�氱煡
+// @Produce application/json
+// @Param object query request.TaskList true "鏌ヨ鍙傛暟"
+// @Success 200 {object} contextx.Response{data=[]model.ScheduleTask} "鎴愬姛"
+// @Router /v1/task/list [get]
+func (slf *TaskApi) TaskList(c *gin.Context) {
+ var params request.TaskList
+ ctx, ok := contextx.NewContext(c, ¶ms)
+ if !ok {
+ return
+ }
+
+ taskList, total, code := service.NewTaskService().GetTaskList(params.Page, params.PageSize)
+ if code != ecode.OK {
+ ctx.Fail(code)
+ return
+ }
+
+ for _, task := range taskList {
+ if task.Data != "" {
+ err := json.Unmarshal([]byte(task.Data), &task.TaskInfo)
+ if err != nil {
+ ctx.Fail(ecode.UnknownErr)
+ return
+ }
+ }
+
+ }
+
+ ctx.ResultList(taskList, total)
+}
diff --git a/conf/apsClient.json b/conf/apsClient.json
index c5d81ae..aaaf003 100644
--- a/conf/apsClient.json
+++ b/conf/apsClient.json
@@ -21,6 +21,14 @@
"connMaxLifeTimeSecond": 120,
"connMaxIdleTimeSecond": 3600
},
+ "sqlite": {
+ "dsn": "aps.db",
+ "logMode": true,
+ "maxIdleCon": 50,
+ "maxOpenCon": 200,
+ "connMaxLifeTimeSecond": 120,
+ "connMaxIdleTimeSecond": 3600
+ },
"redis": {
"host": "127.0.0.1",
"port": 6379,
@@ -65,7 +73,13 @@
},
"Services":{
"apsServer": "http://127.0.0.1:9081"
+ },
+ "nsqConf": {
+ "NodeId": "wangpengfei",
+ "nsqdAddr": "121.31.232.83:4150",
+ "nsqlookupdAddr":""
}
+
}
diff --git a/conf/config.go b/conf/config.go
index 44b9eab..60569a2 100644
--- a/conf/config.go
+++ b/conf/config.go
@@ -5,6 +5,7 @@
"apsClient/pkg/logx"
"apsClient/pkg/mysqlx"
"apsClient/pkg/redisx"
+ "apsClient/pkg/sqlitex"
"flag"
"github.com/spf13/viper"
"log"
@@ -80,6 +81,12 @@
ApsServer string
}
+ nsqConf struct {
+ NodeId string
+ NsqdAddr string
+ NsqlookupdAddr string
+ }
+
config struct {
// 绯荤粺閰嶇疆
System System
@@ -89,6 +96,9 @@
// mysql閰嶇疆
Mysql mysqlx.Conf
+
+ // mysql閰嶇疆
+ Sqlite sqlitex.Conf
// redis閰嶇疆
Redis redisx.Conf
@@ -110,6 +120,8 @@
//Services Address
Services Services
+
+ NsqConf nsqConf
}
)
diff --git a/constvar/const.go b/constvar/const.go
index 00642ca..8e487a3 100644
--- a/constvar/const.go
+++ b/constvar/const.go
@@ -1,15 +1,5 @@
package constvar
-type UserStatus int
-
-type UserType int
-
const (
- UserTypeSuper UserType = iota + 1 // 瓒呯骇绠$悊鍛�
- UserTypePrimary // 涓昏处鎴�
- UserTypeSub // 瀛愯处鎴�
-)
-
-const (
- ApsServerHost = ""
+ NsqTopicScheduleTask = "aps.%v.scheduleTask" //鎺掔▼浠诲姟涓嬪彂
)
diff --git a/docs/docs.go b/docs/docs.go
index 40f438e..be67e6e 100644
--- a/docs/docs.go
+++ b/docs/docs.go
@@ -40,6 +40,39 @@
"200": {
"description": "鎴愬姛",
"schema": {
+ "$ref": "#/definitions/contextx.Response"
+ }
+ }
+ }
+ }
+ },
+ "/v1/task/list": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Base"
+ ],
+ "summary": "浠诲姟寮�鍚�氱煡",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "椤电爜",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "姣忛〉澶у皬",
+ "name": "pageSize",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "鎴愬姛",
+ "schema": {
"allOf": [
{
"$ref": "#/definitions/contextx.Response"
@@ -48,7 +81,10 @@
"type": "object",
"properties": {
"data": {
- "$ref": "#/definitions/response.LoginResponse"
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/model.ScheduleTask"
+ }
}
}
}
@@ -60,24 +96,6 @@
}
},
"definitions": {
- "constvar.UserType": {
- "type": "integer",
- "enum": [
- 1,
- 2,
- 3
- ],
- "x-enum-comments": {
- "UserTypePrimary": "涓昏处鎴�",
- "UserTypeSub": "瀛愯处鎴�",
- "UserTypeSuper": "瓒呯骇绠$悊鍛�"
- },
- "x-enum-varnames": [
- "UserTypeSuper",
- "UserTypePrimary",
- "UserTypeSub"
- ]
- },
"contextx.Response": {
"type": "object",
"properties": {
@@ -90,85 +108,39 @@
}
}
},
- "model.User": {
+ "model.ScheduleTask": {
"type": "object",
"properties": {
- "companyCity": {
+ "amount": {
+ "description": "Parameter string ` + "`" + `json:\"parameter\"` + "`" + ` //浜у搧鍚嶇О\nCustomer string ` + "`" + `json:\"customer\"` + "`" + `\nDeliverDate string ` + "`" + `json:\"deliverDate\"` + "`" + `\nOrderAttr string ` + "`" + `json:\"orderAttr\"` + "`" + `",
+ "type": "number"
+ },
+ "data": {
+ "description": "鎺掔▼浠诲姟json涓�",
"type": "string"
},
- "companyContact": {
- "type": "string"
- },
- "companyEmail": {
- "type": "string"
- },
- "companyLogo": {
- "type": "string"
- },
- "companyName": {
- "type": "string"
- },
- "companyProvince": {
- "type": "string"
- },
- "companyTrade": {
- "type": "string"
- },
- "createAt": {
- "description": "鍒涘缓鏃堕棿",
- "type": "string"
- },
- "enable": {
- "type": "boolean"
- },
- "headerImage": {
- "type": "string"
- },
- "id": {
- "type": "string"
- },
- "ip": {
- "type": "string"
- },
- "menuIds": {
- "description": "鑿滃崟ID鍒楄〃",
- "type": "array",
- "items": {
- "type": "integer"
- }
- },
- "nickName": {
- "type": "string"
- },
- "parentId": {
- "type": "string"
- },
- "parentName": {
- "type": "string"
- },
- "phone": {
- "type": "string"
- },
- "port": {
- "type": "string"
- },
- "pos": {
- "type": "string"
- },
- "status": {
+ "endTime": {
"type": "integer"
},
- "systemName": {
+ "id": {
+ "type": "integer"
+ },
+ "orderId": {
+ "description": "璁㈠崟id",
"type": "string"
},
- "updateAt": {
- "description": "鏇存柊鏃堕棿",
+ "productId": {
+ "description": "璁㈠崟id",
"type": "string"
},
- "userType": {
- "$ref": "#/definitions/constvar.UserType"
+ "productName": {
+ "description": "浜у搧鍚嶇О",
+ "type": "string"
},
- "username": {
+ "startTime": {
+ "type": "integer"
+ },
+ "unit": {
"type": "string"
}
}
@@ -195,20 +167,6 @@
"workOrder": {
"description": "宸ュ崟",
"type": "string"
- }
- }
- },
- "response.LoginResponse": {
- "type": "object",
- "properties": {
- "expiresAt": {
- "type": "integer"
- },
- "token": {
- "type": "string"
- },
- "user": {
- "$ref": "#/definitions/model.User"
}
}
}
diff --git a/docs/swagger.json b/docs/swagger.json
index 66654ce..bba27ba 100644
--- a/docs/swagger.json
+++ b/docs/swagger.json
@@ -28,6 +28,39 @@
"200": {
"description": "鎴愬姛",
"schema": {
+ "$ref": "#/definitions/contextx.Response"
+ }
+ }
+ }
+ }
+ },
+ "/v1/task/list": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Base"
+ ],
+ "summary": "浠诲姟寮�鍚�氱煡",
+ "parameters": [
+ {
+ "type": "integer",
+ "description": "椤电爜",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "姣忛〉澶у皬",
+ "name": "pageSize",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "鎴愬姛",
+ "schema": {
"allOf": [
{
"$ref": "#/definitions/contextx.Response"
@@ -36,7 +69,10 @@
"type": "object",
"properties": {
"data": {
- "$ref": "#/definitions/response.LoginResponse"
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/model.ScheduleTask"
+ }
}
}
}
@@ -48,24 +84,6 @@
}
},
"definitions": {
- "constvar.UserType": {
- "type": "integer",
- "enum": [
- 1,
- 2,
- 3
- ],
- "x-enum-comments": {
- "UserTypePrimary": "涓昏处鎴�",
- "UserTypeSub": "瀛愯处鎴�",
- "UserTypeSuper": "瓒呯骇绠$悊鍛�"
- },
- "x-enum-varnames": [
- "UserTypeSuper",
- "UserTypePrimary",
- "UserTypeSub"
- ]
- },
"contextx.Response": {
"type": "object",
"properties": {
@@ -78,85 +96,39 @@
}
}
},
- "model.User": {
+ "model.ScheduleTask": {
"type": "object",
"properties": {
- "companyCity": {
+ "amount": {
+ "description": "Parameter string `json:\"parameter\"` //浜у搧鍚嶇О\nCustomer string `json:\"customer\"`\nDeliverDate string `json:\"deliverDate\"`\nOrderAttr string `json:\"orderAttr\"`",
+ "type": "number"
+ },
+ "data": {
+ "description": "鎺掔▼浠诲姟json涓�",
"type": "string"
},
- "companyContact": {
- "type": "string"
- },
- "companyEmail": {
- "type": "string"
- },
- "companyLogo": {
- "type": "string"
- },
- "companyName": {
- "type": "string"
- },
- "companyProvince": {
- "type": "string"
- },
- "companyTrade": {
- "type": "string"
- },
- "createAt": {
- "description": "鍒涘缓鏃堕棿",
- "type": "string"
- },
- "enable": {
- "type": "boolean"
- },
- "headerImage": {
- "type": "string"
- },
- "id": {
- "type": "string"
- },
- "ip": {
- "type": "string"
- },
- "menuIds": {
- "description": "鑿滃崟ID鍒楄〃",
- "type": "array",
- "items": {
- "type": "integer"
- }
- },
- "nickName": {
- "type": "string"
- },
- "parentId": {
- "type": "string"
- },
- "parentName": {
- "type": "string"
- },
- "phone": {
- "type": "string"
- },
- "port": {
- "type": "string"
- },
- "pos": {
- "type": "string"
- },
- "status": {
+ "endTime": {
"type": "integer"
},
- "systemName": {
+ "id": {
+ "type": "integer"
+ },
+ "orderId": {
+ "description": "璁㈠崟id",
"type": "string"
},
- "updateAt": {
- "description": "鏇存柊鏃堕棿",
+ "productId": {
+ "description": "璁㈠崟id",
"type": "string"
},
- "userType": {
- "$ref": "#/definitions/constvar.UserType"
+ "productName": {
+ "description": "浜у搧鍚嶇О",
+ "type": "string"
},
- "username": {
+ "startTime": {
+ "type": "integer"
+ },
+ "unit": {
"type": "string"
}
}
@@ -183,20 +155,6 @@
"workOrder": {
"description": "宸ュ崟",
"type": "string"
- }
- }
- },
- "response.LoginResponse": {
- "type": "object",
- "properties": {
- "expiresAt": {
- "type": "integer"
- },
- "token": {
- "type": "string"
- },
- "user": {
- "$ref": "#/definitions/model.User"
}
}
}
diff --git a/docs/swagger.yaml b/docs/swagger.yaml
index 6f04fa0..d7c6fa3 100644
--- a/docs/swagger.yaml
+++ b/docs/swagger.yaml
@@ -1,18 +1,4 @@
definitions:
- constvar.UserType:
- enum:
- - 1
- - 2
- - 3
- type: integer
- x-enum-comments:
- UserTypePrimary: 涓昏处鎴�
- UserTypeSub: 瀛愯处鎴�
- UserTypeSuper: 瓒呯骇绠$悊鍛�
- x-enum-varnames:
- - UserTypeSuper
- - UserTypePrimary
- - UserTypeSub
contextx.Response:
properties:
code:
@@ -21,60 +7,34 @@
msg:
type: string
type: object
- model.User:
+ model.ScheduleTask:
properties:
- companyCity:
+ amount:
+ description: |-
+ Parameter string `json:"parameter"` //浜у搧鍚嶇О
+ Customer string `json:"customer"`
+ DeliverDate string `json:"deliverDate"`
+ OrderAttr string `json:"orderAttr"`
+ type: number
+ data:
+ description: 鎺掔▼浠诲姟json涓�
type: string
- companyContact:
- type: string
- companyEmail:
- type: string
- companyLogo:
- type: string
- companyName:
- type: string
- companyProvince:
- type: string
- companyTrade:
- type: string
- createAt:
- description: 鍒涘缓鏃堕棿
- type: string
- enable:
- type: boolean
- headerImage:
- type: string
- id:
- type: string
- ip:
- type: string
- menuIds:
- description: 鑿滃崟ID鍒楄〃
- items:
- type: integer
- type: array
- nickName:
- type: string
- parentId:
- type: string
- parentName:
- type: string
- phone:
- type: string
- port:
- type: string
- pos:
- type: string
- status:
+ endTime:
type: integer
- systemName:
+ id:
+ type: integer
+ orderId:
+ description: 璁㈠崟id
type: string
- updateAt:
- description: 鏇存柊鏃堕棿
+ productId:
+ description: 璁㈠崟id
type: string
- userType:
- $ref: '#/definitions/constvar.UserType'
- username:
+ productName:
+ description: 浜у搧鍚嶇О
+ type: string
+ startTime:
+ type: integer
+ unit:
type: string
type: object
request.TaskInfo:
@@ -95,15 +55,6 @@
description: 宸ュ崟
type: string
type: object
- response.LoginResponse:
- properties:
- expiresAt:
- type: integer
- token:
- type: string
- user:
- $ref: '#/definitions/model.User'
- type: object
info:
contact: {}
paths:
@@ -122,11 +73,34 @@
"200":
description: 鎴愬姛
schema:
+ $ref: '#/definitions/contextx.Response'
+ summary: 浠诲姟寮�鍚�氱煡
+ tags:
+ - Base
+ /v1/task/list:
+ get:
+ parameters:
+ - description: 椤电爜
+ in: query
+ name: page
+ type: integer
+ - description: 姣忛〉澶у皬
+ in: query
+ name: pageSize
+ type: integer
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: 鎴愬姛
+ schema:
allOf:
- $ref: '#/definitions/contextx.Response'
- properties:
data:
- $ref: '#/definitions/response.LoginResponse'
+ items:
+ $ref: '#/definitions/model.ScheduleTask'
+ type: array
type: object
summary: 浠诲姟寮�鍚�氱煡
tags:
diff --git a/go.mod b/go.mod
index d655aa7..dd75050 100644
--- a/go.mod
+++ b/go.mod
@@ -24,7 +24,7 @@
golang.org/x/sync v0.1.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gorm.io/driver/mysql v1.5.0
- gorm.io/gorm v1.25.0
+ gorm.io/gorm v1.25.3
moul.io/zapgorm2 v1.3.0
)
@@ -51,6 +51,7 @@
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
+ github.com/golang/snappy v0.0.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
@@ -61,13 +62,16 @@
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
+ github.com/mattn/go-sqlite3 v1.14.17 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/nsqio/go-nsq v1.1.0 // indirect
github.com/onsi/gomega v1.27.4 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
+ github.com/shopspring/decimal v1.3.1 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
@@ -89,4 +93,5 @@
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
+ gorm.io/driver/sqlite v1.5.2 // indirect
)
diff --git a/go.sum b/go.sum
index 321a1fa..075e4d1 100644
--- a/go.sum
+++ b/go.sum
@@ -70,7 +70,9 @@
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
@@ -185,6 +187,8 @@
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -275,6 +279,8 @@
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
+github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -290,6 +296,8 @@
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nsqio/go-nsq v1.1.0 h1:PQg+xxiUjA7V+TLdXw7nVrJ5Jbl3sN86EhGCQj4+FYE=
+github.com/nsqio/go-nsq v1.1.0/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@@ -336,6 +344,8 @@
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
+github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@@ -803,10 +813,14 @@
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/mysql v1.5.0 h1:6hSAT5QcyIaty0jfnff0z0CLDjyRgZ8mlMHLqSt7uXM=
gorm.io/driver/mysql v1.5.0/go.mod h1:FFla/fJuCvyTi7rJQd27qlNX2v3L6deTR1GgTjSOLPo=
+gorm.io/driver/sqlite v1.5.2 h1:TpQ+/dqCY4uCigCFyrfnrJnrW9zjpelWVoEVNy5qJkc=
+gorm.io/driver/sqlite v1.5.2/go.mod h1:qxAuCol+2r6PannQDpOP1FP6ag3mKi4esLnB/jHed+4=
gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.0 h1:+KtYtb2roDz14EQe4bla8CbQlmb9dN3VejSai3lprfU=
gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+gorm.io/gorm v1.25.3 h1:zi4rHZj1anhZS2EuEODMhDisGy+Daq9jtPrNGgbQYD8=
+gorm.io/gorm v1.25.3/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/logs/apsClient.err.log b/logs/apsClient.err.log
index 92313f0..e636088 100644
--- a/logs/apsClient.err.log
+++ b/logs/apsClient.err.log
@@ -16,3 +16,38 @@
[2023-08-15 10:43:02] [error] [apsClient/api/v1.(*NoticeApi).TaskStart.func1:37] TaskStart Notice GetProcessModel error: Post "http://127.0.0.1:9081/api-s/v1/processParams/info": EOF
[2023-08-15 10:48:32] [error] [apsClient/api/v1.(*NoticeApi).TaskStart.func1:37] TaskStart Notice GetProcessModel error: json: cannot unmarshal number into Go struct field GetProcessModelResponse.Code of type string
[2023-08-15 10:49:36] [error] [apsClient/api/v1.(*NoticeApi).TaskStart.func1:37] TaskStart Notice GetProcessModel error: json: cannot unmarshal number into Go struct field GetProcessModelResponse.Code of type string
+[2023-08-15 15:25:28] [error] [apsClient/pkg/safe.RecoverPanic:17] panic: runtime error: invalid memory address or nil pointer dereference, stack trace: goroutine 36 [running]:
+runtime/debug.Stack()
+ C:/Program Files/Go/src/runtime/debug/stack.go:24 +0x65
+apsClient/pkg/safe.RecoverPanic()
+ C:/code/apsClient/pkg/safe/safe.go:17 +0x39
+panic({0x1182d20, 0x203d620})
+ C:/Program Files/Go/src/runtime/panic.go:884 +0x213
+apsClient/nsq.Consume({0xc0005800c0, 0x1a}, {0x1251b90?, 0x1?})
+ C:/code/apsClient/nsq/consumer.go:23 +0x1dc
+apsClient/nsq.Init.func1()
+ C:/code/apsClient/nsq/nsq.go:20 +0x6d
+apsClient/pkg/safe.Go.func1()
+ C:/code/apsClient/pkg/safe/safe.go:11 +0x3f
+created by apsClient/pkg/safe.Go
+ C:/code/apsClient/pkg/safe/safe.go:9 +0x56
+
+[2023-08-15 15:34:30] [error] [apsClient/model.Init:11] failed to initialize database, got error Binary was compiled with 'CGO_ENABLED=0', go-sqlite3 requires cgo to work. This is a stub
+[2023-08-15 15:34:30] [error] [main.main:23] model Init err:Binary was compiled with 'CGO_ENABLED=0', go-sqlite3 requires cgo to work. This is a stub
+[2023-08-15 15:35:48] [error] [apsClient/model.Init:11] failed to initialize database, got error Binary was compiled with 'CGO_ENABLED=0', go-sqlite3 requires cgo to work. This is a stub
+[2023-08-15 15:35:48] [error] [main.main:23] model Init err:Binary was compiled with 'CGO_ENABLED=0', go-sqlite3 requires cgo to work. This is a stub
+[2023-08-15 15:41:52] [error] [apsClient/model.(*ScheduleTaskSearch).Create:83] trace {"error": "no such table: user_menu", "elapsed": 0.0010028, "rows": 0, "sql": "INSERT INTO `user_menu` (`created_at`,`updated_at`,`deleted_at`,`menu_id`,`user_id`) VALUES (\"2023-08-15 15:41:52.976\",\"2023-08-15 15:41:52.976\",NULL,1,\"userId\") RETURNING `id`"}
+[2023-08-15 15:41:52] [error] [apsClient/model.(*ScheduleTaskSearch).First:151] trace {"error": "no such table: user_menu", "elapsed": 0.0010025, "rows": 0, "sql": "SELECT * FROM `user_menu` WHERE menu_id = 1 AND `user_menu`.`deleted_at` IS NULL ORDER BY `user_menu`.`id` LIMIT 1"}
+[2023-08-15 18:09:46] [error] [apsClient/model.Init:10] failed to initialize database, got error Binary was compiled with 'CGO_ENABLED=0', go-sqlite3 requires cgo to work. This is a stub
+[2023-08-15 18:09:46] [error] [main.main:23] model Init err:Binary was compiled with 'CGO_ENABLED=0', go-sqlite3 requires cgo to work. This is a stub
+[2023-08-15 19:36:01] [error] [apsClient/model.Init:10] failed to initialize database, got error Binary was compiled with 'CGO_ENABLED=0', go-sqlite3 requires cgo to work. This is a stub
+[2023-08-15 19:36:01] [error] [main.main:23] model Init err:Binary was compiled with 'CGO_ENABLED=0', go-sqlite3 requires cgo to work. This is a stub
+[2023-08-15 19:36:44] [error] [apsClient/nsq.(*ScheduleTask).HandleMessage:61] ScheduleTask HandleMessage task structx.AssignTo taskRecord err: json: cannot unmarshal number into Go struct field Procedures.Procedures.startTime of type string, old: &nsq.DeliverScheduleTask{Order:nsq.Order{OrderID:"0100000", ProductID:"PE500A01D/F", ProductName:"鍓嶇汉姣涘竷", Parameter:"", Customer:"1200", DeliverDate:"2023-08-16", OrderAttr:"浜у搧绫诲埆:娑ょ憾", Amount:decimal.Decimal{value:(*big.Int)(0xc0005202a0), exp:0}, Unit:"", StartTime:1692328320, EndTime:1692946800}, Procedures:[]*nsq.ProductProcedure{(*nsq.ProductProcedure)(0xc0004741e0)}}, new: &model.ScheduleTask{Model:gorm.Model{ID:0x0, CreatedAt:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), UpdatedAt:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletedAt:gorm.DeletedAt{Time:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Valid:false}}, Id:0, OrderId:"", ProductId:"", ProductName:"", Parameter:"", Customer:"", DeliverDate:"", OrderAttr:"", Amount:decimal.Decimal{value:(*big.Int)(nil), exp:0}, Unit:"", StartTime:0, EndTime:0, Data:"", Procedures:[]*model.Procedures{(*model.Procedures)(0xc0005742a0)}}
+[2023-08-15 19:38:15] [error] [apsClient/nsq.(*ScheduleTask).HandleMessage:61] ScheduleTask HandleMessage task structx.AssignTo taskRecord err: json: cannot unmarshal number into Go struct field Procedures.Procedures.startTime of type string, old: &nsq.DeliverScheduleTask{Order:nsq.Order{OrderID:"0100000", ProductID:"PE500A01D/F", ProductName:"鍓嶇汉姣涘竷", Parameter:"", Customer:"1200", DeliverDate:"2023-08-16", OrderAttr:"浜у搧绫诲埆:娑ょ憾", Amount:decimal.Decimal{value:(*big.Int)(0xc0000464e0), exp:0}, Unit:"", StartTime:1692328320, EndTime:1692946800}, Procedures:[]*nsq.ProductProcedure{(*nsq.ProductProcedure)(0xc00007e000)}}, new: &model.ScheduleTask{Model:gorm.Model{ID:0x0, CreatedAt:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), UpdatedAt:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletedAt:gorm.DeletedAt{Time:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Valid:false}}, Id:0, OrderId:"", ProductId:"", ProductName:"", Parameter:"", Customer:"", DeliverDate:"", OrderAttr:"", Amount:decimal.Decimal{value:(*big.Int)(nil), exp:0}, Unit:"", StartTime:0, EndTime:0, Data:"", Procedures:[]*model.Procedures{(*model.Procedures)(0xc0000d2000)}}
+[2023-08-15 19:41:15] [error] [apsClient/nsq.(*ScheduleTask).HandleMessage:61] ScheduleTask HandleMessage task structx.AssignTo taskRecord err: json: cannot unmarshal number into Go struct field Procedures.Procedures.startTime of type string, old: &nsq.DeliverScheduleTask{Order:nsq.Order{OrderID:"0100000", ProductID:"PE500A01D/F", ProductName:"鍓嶇汉姣涘竷", Parameter:"", Customer:"1200", DeliverDate:"2023-08-16", OrderAttr:"浜у搧绫诲埆:娑ょ憾", Amount:decimal.Decimal{value:(*big.Int)(0xc00045a040), exp:0}, Unit:"", StartTime:1692328320, EndTime:1692946800}, Procedures:[]*nsq.ProductProcedure{(*nsq.ProductProcedure)(0xc00058c8c0)}}, new: &model.ScheduleTask{Model:gorm.Model{ID:0x0, CreatedAt:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), UpdatedAt:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletedAt:gorm.DeletedAt{Time:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Valid:false}}, Id:0, OrderId:"", ProductId:"", ProductName:"", Parameter:"", Customer:"", DeliverDate:"", OrderAttr:"", Amount:decimal.Decimal{value:(*big.Int)(nil), exp:0}, Unit:"", StartTime:0, EndTime:0, Data:"", Procedures:[]*model.Procedures{(*model.Procedures)(0xc0004f4000)}}
+[2023-08-15 19:48:02] [error] [apsClient/model.(*ScheduleTaskSearch).Find:199] trace {"error": "Procedures: unsupported relations for schema ScheduleTask", "elapsed": 0, "rows": 0, "sql": "SELECT * FROM `schedule_task` WHERE end_time < 1692100082 AND `schedule_task`.`deleted_at` IS NULL LIMIT 20"}
+[2023-08-15 19:48:37] [error] [apsClient/model.(*ScheduleTaskSearch).Find:199] trace {"error": "Procedures: unsupported relations for schema ScheduleTask", "elapsed": 0.0009641, "rows": 4, "sql": "SELECT * FROM `schedule_task` WHERE `schedule_task`.`deleted_at` IS NULL LIMIT 20"}
+[2023-08-15 19:48:44] [error] [apsClient/model.(*ScheduleTaskSearch).Find:199] trace {"error": "Procedures: unsupported relations for schema ScheduleTask", "elapsed": 0, "rows": 4, "sql": "SELECT * FROM `schedule_task` WHERE `schedule_task`.`deleted_at` IS NULL LIMIT 20"}
+[2023-08-15 19:53:30] [error] [gorm.io/gorm/migrator.Migrator.AutoMigrate:113] failed to parse value model.ScheduleTask{Model:gorm.Model{ID:0x0, CreatedAt:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), UpdatedAt:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletedAt:gorm.DeletedAt{Time:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Valid:false}}, Id:0, OrderId:"", ProductId:"", ProductName:"", Amount:decimal.Decimal{value:(*big.Int)(nil), exp:0}, Unit:"", StartTime:0, EndTime:0, Data:"", TaskInfo:request.DeliverScheduleTask{Order:request.Order{OrderID:"", ProductID:"", ProductName:"", Parameter:"", Customer:"", DeliverDate:"", OrderAttr:"", Amount:decimal.Decimal{value:(*big.Int)(nil), exp:0}, Unit:"", StartTime:0, EndTime:0}, Procedures:[]*request.ProductProcedure(nil)}}, got error invalid field found for struct apsClient/model/request.DeliverScheduleTask's field Order: define a valid foreign key for relations or implement the Valuer/Scanner interface
+[2023-08-15 19:53:30] [error] [gorm.io/gorm/migrator.Migrator.CreateTable:207] failed to parse value model.ScheduleTask{Model:gorm.Model{ID:0x0, CreatedAt:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), UpdatedAt:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletedAt:gorm.DeletedAt{Time:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Valid:false}}, Id:0, OrderId:"", ProductId:"", ProductName:"", Amount:decimal.Decimal{value:(*big.Int)(nil), exp:0}, Unit:"", StartTime:0, EndTime:0, Data:"", TaskInfo:request.DeliverScheduleTask{Order:request.Order{OrderID:"", ProductID:"", ProductName:"", Parameter:"", Customer:"", DeliverDate:"", OrderAttr:"", Amount:decimal.Decimal{value:(*big.Int)(nil), exp:0}, Unit:"", StartTime:0, EndTime:0}, Procedures:[]*request.ProductProcedure(nil)}}, got error invalid field found for struct apsClient/model/request.DeliverScheduleTask's field Order: define a valid foreign key for relations or implement the Valuer/Scanner interface
+[2023-08-15 19:53:30] [error] [main.main:23] model Init err:invalid field found for struct apsClient/model/request.DeliverScheduleTask's field Order: define a valid foreign key for relations or implement the Valuer/Scanner interface
diff --git a/logs/apsClient.info.log b/logs/apsClient.info.log
index c21f8e3..d69ffb7 100644
--- a/logs/apsClient.info.log
+++ b/logs/apsClient.info.log
@@ -40,3 +40,151 @@
[2023-08-15 10:48:25] [info] [apsClient/pkg/contextx.NewContext.func1:38] 192.168.20.120 | POST /v1/notice/task/start | uid: | &{OrderId:string Product:string Procedure:string WorkOrder:string Device:string}
[2023-08-15 10:48:45] [info] [apsClient/pkg/contextx.NewContext.func1:38] 192.168.20.120 | POST /v1/notice/task/start | uid: | &{OrderId:string Product:string Procedure:string WorkOrder:string Device:string}
[2023-08-15 10:49:54] [info] [main.shutdown:42] apsClient exited...
+[2023-08-15 10:49:57] [info] [main.main:27] apsClient start serve...
+[2023-08-15 10:49:59] [info] [apsClient/pkg/contextx.NewContext.func1:38] 192.168.20.120 | POST /v1/notice/task/start | uid: | &{OrderId:string Product:string Procedure:string WorkOrder:string Device:string}
+[2023-08-15 10:49:59] [info] [apsClient/api/v1.(*NoticeApi).TaskStart.func1:40] TaskStart Notice GetProcessModel: {Number:宸ヨ壓妯″瀷缂栧彿 OrderId:璁㈠崟 Product:浜у搧 Procedure:宸ュ簭 WorkOrder:宸ュ崟 Device:璁惧 ParamsMap:map[鍘嬪己:100Mpa 鏃堕棿:100min 娓╁害:1000]}
+[2023-08-15 15:24:23] [info] [main.shutdown:42] apsClient exited...
+[2023-08-15 15:25:28] [info] [main.main:33] apsClient start serve...
+[2023-08-15 15:25:28] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.attendance
+[2023-08-15 15:26:08] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 15:26:11] [info] [main.main:33] apsClient start serve...
+[2023-08-15 15:26:11] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 15:34:26] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 15:37:47] [info] [main.main:33] apsClient start serve...
+[2023-08-15 15:37:47] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 15:39:19] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 15:39:28] [info] [main.main:33] apsClient start serve...
+[2023-08-15 15:39:28] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 15:41:47] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 15:41:52] [info] [main.main:33] apsClient start serve...
+[2023-08-15 15:41:52] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 15:42:41] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 15:42:46] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task\""}
+[2023-08-15 15:42:46] [debug] [gorm.io/gorm/migrator.Migrator.CreateTable.func1:290] trace {"elapsed": 0.0050171, "rows": 0, "sql": "CREATE TABLE `schedule_task` (`id` integer,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`menu_id` bigint(20),`user_id` varchar(255),PRIMARY KEY (`id`))"}
+[2023-08-15 15:42:46] [debug] [gorm.io/driver/sqlite.Migrator.CreateIndex.func1:294] trace {"elapsed": 0.0050369, "rows": 0, "sql": "CREATE INDEX `idx_schedule_task_user_id` ON `schedule_task`(`user_id`)"}
+[2023-08-15 15:42:46] [debug] [gorm.io/driver/sqlite.Migrator.CreateIndex.func1:294] trace {"elapsed": 0.0039837, "rows": 0, "sql": "CREATE INDEX `idx_schedule_task_deleted_at` ON `schedule_task`(`deleted_at`)"}
+[2023-08-15 15:42:46] [debug] [apsClient/model.(*ScheduleTaskSearch).Create:83] trace {"elapsed": 0.0040376, "rows": 1, "sql": "INSERT INTO `schedule_task` (`created_at`,`updated_at`,`deleted_at`,`menu_id`,`user_id`) VALUES (\"2023-08-15 15:42:46.557\",\"2023-08-15 15:42:46.557\",NULL,1,\"userId\") RETURNING `id`"}
+[2023-08-15 15:42:46] [debug] [apsClient/model.(*ScheduleTaskSearch).First:151] trace {"elapsed": 0.0009762, "rows": 1, "sql": "SELECT * FROM `schedule_task` WHERE menu_id = 1 AND `schedule_task`.`deleted_at` IS NULL ORDER BY `schedule_task`.`id` LIMIT 1"}
+[2023-08-15 15:42:46] [info] [main.main:33] apsClient start serve...
+[2023-08-15 15:42:46] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 15:44:43] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task\""}
+[2023-08-15 15:44:48] [debug] [gorm.io/gorm/migrator.Migrator.RunWithValue:71] trace {"elapsed": 0, "rows": 3, "sql": "SELECT sql FROM sqlite_master WHERE type IN (\"table\",\"index\") AND tbl_name = \"schedule_task\" AND sql IS NOT NULL order by type = \"table\" desc"}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.ColumnTypes.func1:119] trace {"elapsed": 0, "rows": -1, "sql": "SELECT * FROM `schedule_task` LIMIT 1"}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.AlterColumn:80] trace {"elapsed": 0, "rows": 1, "sql": "PRAGMA foreign_keys"}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.getRawDDL:370] trace {"elapsed": 0, "rows": -1, "sql": "SELECT sql FROM sqlite_master WHERE type = \"table\" AND tbl_name = \"schedule_task\" AND name = \"schedule_task\""}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.recreateTable.func1.1:414] trace {"elapsed": 0.0010022, "rows": 0, "sql": "CREATE TABLE `schedule_task__temp` (`id` integer,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`menu_id` bigint(20),`user_id` varchar(255),PRIMARY KEY (`id`))"}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.recreateTable.func1.1:424] trace {"elapsed": 0, "rows": 1, "sql": "INSERT INTO `schedule_task__temp`(`id`,`created_at`,`updated_at`,`deleted_at`,`menu_id`,`user_id`) SELECT `id`,`created_at`,`updated_at`,`deleted_at`,`menu_id`,`user_id` FROM `schedule_task`"}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.recreateTable.func1.1:424] trace {"elapsed": 0.0010015, "rows": 1, "sql": "DROP TABLE `schedule_task`"}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.recreateTable.func1.1:424] trace {"elapsed": 0, "rows": 1, "sql": "ALTER TABLE `schedule_task__temp` RENAME TO `schedule_task`"}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_user_id\""}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.CreateIndex.func1:294] trace {"elapsed": 0.0040193, "rows": 1, "sql": "CREATE INDEX `idx_schedule_task_user_id` ON `schedule_task`(`user_id`)"}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_deleted_at\""}
+[2023-08-15 15:44:48] [debug] [gorm.io/driver/sqlite.Migrator.CreateIndex.func1:294] trace {"elapsed": 0.0030108, "rows": 1, "sql": "CREATE INDEX `idx_schedule_task_deleted_at` ON `schedule_task`(`deleted_at`)"}
+[2023-08-15 15:44:48] [debug] [apsClient/model.(*ScheduleTaskSearch).Create:83] trace {"elapsed": 0.0050514, "rows": 1, "sql": "INSERT INTO `schedule_task` (`created_at`,`updated_at`,`deleted_at`,`menu_id`,`user_id`) VALUES (\"2023-08-15 15:44:48.543\",\"2023-08-15 15:44:48.543\",NULL,1,\"userId\") RETURNING `id`"}
+[2023-08-15 15:44:48] [debug] [apsClient/model.(*ScheduleTaskSearch).First:151] trace {"elapsed": 0, "rows": 1, "sql": "SELECT * FROM `schedule_task` WHERE menu_id = 1 AND `schedule_task`.`deleted_at` IS NULL ORDER BY `schedule_task`.`id` LIMIT 1"}
+[2023-08-15 15:44:48] [info] [main.main:33] apsClient start serve...
+[2023-08-15 15:44:48] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 16:08:19] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task\""}
+[2023-08-15 16:08:24] [debug] [gorm.io/gorm/migrator.Migrator.RunWithValue:71] trace {"elapsed": 0, "rows": 3, "sql": "SELECT sql FROM sqlite_master WHERE type IN (\"table\",\"index\") AND tbl_name = \"schedule_task\" AND sql IS NOT NULL order by type = \"table\" desc"}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.ColumnTypes.func1:119] trace {"elapsed": 0, "rows": -1, "sql": "SELECT * FROM `schedule_task` LIMIT 1"}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.AlterColumn:80] trace {"elapsed": 0, "rows": 1, "sql": "PRAGMA foreign_keys"}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.getRawDDL:370] trace {"elapsed": 0, "rows": -1, "sql": "SELECT sql FROM sqlite_master WHERE type = \"table\" AND tbl_name = \"schedule_task\" AND name = \"schedule_task\""}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.recreateTable.func1.1:414] trace {"elapsed": 0.0020047, "rows": 0, "sql": "CREATE TABLE `schedule_task__temp` (`id` integer,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`menu_id` bigint(20),`user_id` varchar(255),PRIMARY KEY (`id`))"}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.recreateTable.func1.1:424] trace {"elapsed": 0, "rows": 2, "sql": "INSERT INTO `schedule_task__temp`(`id`,`created_at`,`updated_at`,`deleted_at`,`menu_id`,`user_id`) SELECT `id`,`created_at`,`updated_at`,`deleted_at`,`menu_id`,`user_id` FROM `schedule_task`"}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.recreateTable.func1.1:424] trace {"elapsed": 0, "rows": 2, "sql": "DROP TABLE `schedule_task`"}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.recreateTable.func1.1:424] trace {"elapsed": 0.0010019, "rows": 2, "sql": "ALTER TABLE `schedule_task__temp` RENAME TO `schedule_task`"}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0.0010022, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_deleted_at\""}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.CreateIndex.func1:294] trace {"elapsed": 0.0230621, "rows": 2, "sql": "CREATE INDEX `idx_schedule_task_deleted_at` ON `schedule_task`(`deleted_at`)"}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_user_id\""}
+[2023-08-15 16:08:24] [debug] [gorm.io/driver/sqlite.Migrator.CreateIndex.func1:294] trace {"elapsed": 0.0050129, "rows": 2, "sql": "CREATE INDEX `idx_schedule_task_user_id` ON `schedule_task`(`user_id`)"}
+[2023-08-15 16:08:24] [info] [main.main:33] apsClient start serve...
+[2023-08-15 16:08:24] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 17:54:43] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 19:36:31] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task\""}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.RunWithValue:71] trace {"elapsed": 0, "rows": 3, "sql": "SELECT sql FROM sqlite_master WHERE type IN (\"table\",\"index\") AND tbl_name = \"schedule_task\" AND sql IS NOT NULL order by type = \"table\" desc"}
+[2023-08-15 19:36:31] [debug] [gorm.io/driver/sqlite.Migrator.ColumnTypes.func1:119] trace {"elapsed": 0, "rows": -1, "sql": "SELECT * FROM `schedule_task` LIMIT 1"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0050116, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `order_id` text"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0040109, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `product_id` text"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0040373, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `product_name` text"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0049869, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `parameter` text"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0040065, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `customer` text"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0040113, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `deliver_date` text"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0040103, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `order_attr` text"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0060163, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `amount` text"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0050132, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `unit` text"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.004011, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `start_time` integer"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0040399, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `end_time` integer"}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.AddColumn.func1:363] trace {"elapsed": 0.0049836, "rows": 0, "sql": "ALTER TABLE `schedule_task` ADD `data` text"}
+[2023-08-15 19:36:31] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_deleted_at\""}
+[2023-08-15 19:36:31] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"materials\""}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.CreateTable.func1:290] trace {"elapsed": 0.0040106, "rows": 0, "sql": "CREATE TABLE `materials` (`id` integer,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`material_id` text,`material_name` text,`amount` text,`unit` text,PRIMARY KEY (`id`))"}
+[2023-08-15 19:36:31] [debug] [gorm.io/driver/sqlite.Migrator.CreateIndex.func1:294] trace {"elapsed": 0.0030088, "rows": 0, "sql": "CREATE INDEX `idx_materials_deleted_at` ON `materials`(`deleted_at`)"}
+[2023-08-15 19:36:31] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"procedures\""}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.CreateTable.func1:290] trace {"elapsed": 0.005018, "rows": 0, "sql": "CREATE TABLE `procedures` (`id` integer,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`procedure_id` text,`procedure_name` text,`device_id` text,`start_time` text,`end_time` text,`work_hours` text,`input_materials_id` integer,`output_materials_id` integer,PRIMARY KEY (`id`))"}
+[2023-08-15 19:36:31] [debug] [gorm.io/driver/sqlite.Migrator.CreateIndex.func1:294] trace {"elapsed": 0.0040126, "rows": 0, "sql": "CREATE INDEX `idx_procedures_deleted_at` ON `procedures`(`deleted_at`)"}
+[2023-08-15 19:36:31] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task_procedures\""}
+[2023-08-15 19:36:31] [debug] [gorm.io/gorm/migrator.Migrator.CreateTable.func1:290] trace {"elapsed": 0.0040103, "rows": 0, "sql": "CREATE TABLE `schedule_task_procedures` (`schedule_task_id` integer,`procedures_id` integer,PRIMARY KEY (`schedule_task_id`,`procedures_id`))"}
+[2023-08-15 19:36:31] [info] [main.main:33] apsClient start serve...
+[2023-08-15 19:36:31] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 19:45:18] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 19:45:24] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0.0010022, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task\""}
+[2023-08-15 19:45:24] [debug] [gorm.io/gorm/migrator.Migrator.RunWithValue:71] trace {"elapsed": 0, "rows": 3, "sql": "SELECT sql FROM sqlite_master WHERE type IN (\"table\",\"index\") AND tbl_name = \"schedule_task\" AND sql IS NOT NULL order by type = \"table\" desc"}
+[2023-08-15 19:45:24] [debug] [gorm.io/driver/sqlite.Migrator.ColumnTypes.func1:119] trace {"elapsed": 0, "rows": -1, "sql": "SELECT * FROM `schedule_task` LIMIT 1"}
+[2023-08-15 19:45:24] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_deleted_at\""}
+[2023-08-15 19:45:24] [info] [main.main:33] apsClient start serve...
+[2023-08-15 19:45:24] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 19:45:31] [debug] [apsClient/model.(*ScheduleTaskSearch).Create:111] trace {"elapsed": 0.006015, "rows": 1, "sql": "INSERT INTO `schedule_task` (`created_at`,`updated_at`,`deleted_at`,`order_id`,`product_id`,`product_name`,`amount`,`unit`,`start_time`,`end_time`,`data`) VALUES (\"2023-08-15 19:45:31.418\",\"2023-08-15 19:45:31.418\",NULL,\"0100000\",\"PE500A01D/F\",\"鍓嶇汉姣涘竷\",\"1200\",\"\",1692328320,1692946800,\"{\\\"order\\\":{\\\"orderId\\\":\\\"0100000\\\",\\\"productId\\\":\\\"PE500A01D/F\\\",\\\"productName\\\":\\\"鍓嶇汉姣涘竷\\\",\\\"parameter\\\":\\\"\\\",\\\"customer\\\":\\\"1200\\\",\\\"deliverDate\\\":\\\"2023-08-16\\\",\\\"orderAttr\\\":\\\"浜у搧绫诲埆:娑ょ憾\\\",\\\"amount\\\":\\\"1200\\\",\\\"unit\\\":\\\"\\\",\\\"startTime\\\":1692328320,\\\"endTime\\\":1692946800},\\\"procedures\\\":[{\\\"procedureId\\\":\\\"QF\\\",\\\"procedureName\\\":\\\"涓ョ墝鍓嶇汉\\\",\\\"deviceId\\\":\\\"08\\\",\\\"startTime\\\":1692328320,\\\"endTime\\\":1692946800,\\\"workHours\\\":\\\"171.8\\\",\\\"inputMaterials\\\":[{\\\"materialId\\\":\\\"PT\\\",\\\"materialName\\\":\\\"涓ョ墝PT\\\",\\\"amount\\\":\\\"10\\\",\\\"unit\\\":\\\"g/銕\\"}],\\\"outputMaterials\\\":[{\\\"materialId\\\":\\\"PS\\\",\\\"materialName\\\":\\\"涓ョ墝PS\\\",\\\"amount\\\":\\\"10\\\",\\\"unit\\\":\\\"g/銕\\"}],\\\"workers\\\":null}]}\") RETURNING `id`,`id`"}
+[2023-08-15 19:45:45] [debug] [apsClient/model.(*ScheduleTaskSearch).Create:111] trace {"elapsed": 0.0040113, "rows": 1, "sql": "INSERT INTO `schedule_task` (`created_at`,`updated_at`,`deleted_at`,`order_id`,`product_id`,`product_name`,`amount`,`unit`,`start_time`,`end_time`,`data`) VALUES (\"2023-08-15 19:45:45.435\",\"2023-08-15 19:45:45.435\",NULL,\"0100000\",\"PE500A01D/F\",\"鍓嶇汉姣涘竷\",\"1200\",\"\",1692328320,1692946800,\"{\\\"order\\\":{\\\"orderId\\\":\\\"0100000\\\",\\\"productId\\\":\\\"PE500A01D/F\\\",\\\"productName\\\":\\\"鍓嶇汉姣涘竷\\\",\\\"parameter\\\":\\\"\\\",\\\"customer\\\":\\\"1200\\\",\\\"deliverDate\\\":\\\"2023-08-16\\\",\\\"orderAttr\\\":\\\"浜у搧绫诲埆:娑ょ憾\\\",\\\"amount\\\":\\\"1200\\\",\\\"unit\\\":\\\"\\\",\\\"startTime\\\":1692328320,\\\"endTime\\\":1692946800},\\\"procedures\\\":[{\\\"procedureId\\\":\\\"QF\\\",\\\"procedureName\\\":\\\"涓ョ墝鍓嶇汉\\\",\\\"deviceId\\\":\\\"08\\\",\\\"startTime\\\":1692328320,\\\"endTime\\\":1692946800,\\\"workHours\\\":\\\"171.8\\\",\\\"inputMaterials\\\":[{\\\"materialId\\\":\\\"PT\\\",\\\"materialName\\\":\\\"涓ョ墝PT\\\",\\\"amount\\\":\\\"10\\\",\\\"unit\\\":\\\"g/銕\\"}],\\\"outputMaterials\\\":[{\\\"materialId\\\":\\\"PS\\\",\\\"materialName\\\":\\\"涓ョ墝PS\\\",\\\"amount\\\":\\\"10\\\",\\\"unit\\\":\\\"g/銕\\"}],\\\"workers\\\":null}]}\") RETURNING `id`,`id`"}
+[2023-08-15 19:47:49] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 19:47:55] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task\""}
+[2023-08-15 19:47:55] [debug] [gorm.io/gorm/migrator.Migrator.RunWithValue:71] trace {"elapsed": 0.0010031, "rows": 3, "sql": "SELECT sql FROM sqlite_master WHERE type IN (\"table\",\"index\") AND tbl_name = \"schedule_task\" AND sql IS NOT NULL order by type = \"table\" desc"}
+[2023-08-15 19:47:55] [debug] [gorm.io/driver/sqlite.Migrator.ColumnTypes.func1:119] trace {"elapsed": 0, "rows": -1, "sql": "SELECT * FROM `schedule_task` LIMIT 1"}
+[2023-08-15 19:47:55] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0.0020095, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_deleted_at\""}
+[2023-08-15 19:47:55] [info] [main.main:33] apsClient start serve...
+[2023-08-15 19:47:55] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 19:48:02] [info] [apsClient/pkg/contextx.NewContext.func1:44] 192.168.20.120 | GET /v1/task/list?page=1&pageSize=20 | uid: | &{PageInfo:{Page:1 PageSize:20}}
+[2023-08-15 19:48:02] [debug] [apsClient/model.(*ScheduleTaskSearch).Find:193] trace {"elapsed": 0, "rows": 1, "sql": "SELECT count(*) FROM `schedule_task` WHERE end_time < 1692100082 AND `schedule_task`.`deleted_at` IS NULL"}
+[2023-08-15 19:48:31] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 19:48:37] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task\""}
+[2023-08-15 19:48:37] [debug] [gorm.io/gorm/migrator.Migrator.RunWithValue:71] trace {"elapsed": 0, "rows": 3, "sql": "SELECT sql FROM sqlite_master WHERE type IN (\"table\",\"index\") AND tbl_name = \"schedule_task\" AND sql IS NOT NULL order by type = \"table\" desc"}
+[2023-08-15 19:48:37] [debug] [gorm.io/driver/sqlite.Migrator.ColumnTypes.func1:119] trace {"elapsed": 0, "rows": -1, "sql": "SELECT * FROM `schedule_task` LIMIT 1"}
+[2023-08-15 19:48:37] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_deleted_at\""}
+[2023-08-15 19:48:37] [info] [main.main:33] apsClient start serve...
+[2023-08-15 19:48:37] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 19:48:37] [info] [apsClient/pkg/contextx.NewContext.func1:44] 192.168.20.120 | GET /v1/task/list?page=1&pageSize=20 | uid: | &{PageInfo:{Page:1 PageSize:20}}
+[2023-08-15 19:48:37] [debug] [apsClient/model.(*ScheduleTaskSearch).Find:193] trace {"elapsed": 0, "rows": 1, "sql": "SELECT count(*) FROM `schedule_task` WHERE `schedule_task`.`deleted_at` IS NULL"}
+[2023-08-15 19:48:44] [info] [apsClient/pkg/contextx.NewContext.func1:44] 192.168.20.120 | GET /v1/task/list?page=1&pageSize=20 | uid: | &{PageInfo:{Page:1 PageSize:20}}
+[2023-08-15 19:48:44] [debug] [apsClient/model.(*ScheduleTaskSearch).Find:193] trace {"elapsed": 0, "rows": 1, "sql": "SELECT count(*) FROM `schedule_task` WHERE `schedule_task`.`deleted_at` IS NULL"}
+[2023-08-15 19:49:21] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 19:49:27] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task\""}
+[2023-08-15 19:49:27] [debug] [gorm.io/gorm/migrator.Migrator.RunWithValue:71] trace {"elapsed": 0, "rows": 3, "sql": "SELECT sql FROM sqlite_master WHERE type IN (\"table\",\"index\") AND tbl_name = \"schedule_task\" AND sql IS NOT NULL order by type = \"table\" desc"}
+[2023-08-15 19:49:27] [debug] [gorm.io/driver/sqlite.Migrator.ColumnTypes.func1:119] trace {"elapsed": 0, "rows": -1, "sql": "SELECT * FROM `schedule_task` LIMIT 1"}
+[2023-08-15 19:49:27] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_deleted_at\""}
+[2023-08-15 19:49:27] [info] [main.main:33] apsClient start serve...
+[2023-08-15 19:49:27] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 19:49:30] [info] [apsClient/pkg/contextx.NewContext.func1:44] 192.168.20.120 | GET /v1/task/list?page=1&pageSize=20 | uid: | &{PageInfo:{Page:1 PageSize:20}}
+[2023-08-15 19:49:30] [debug] [apsClient/model.(*ScheduleTaskSearch).Find:193] trace {"elapsed": 0, "rows": 1, "sql": "SELECT count(*) FROM `schedule_task` WHERE `schedule_task`.`deleted_at` IS NULL"}
+[2023-08-15 19:49:30] [debug] [apsClient/model.(*ScheduleTaskSearch).Find:199] trace {"elapsed": 0, "rows": 4, "sql": "SELECT * FROM `schedule_task` WHERE `schedule_task`.`deleted_at` IS NULL LIMIT 20"}
+[2023-08-15 19:51:56] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 19:54:05] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task\""}
+[2023-08-15 19:54:05] [debug] [gorm.io/gorm/migrator.Migrator.RunWithValue:71] trace {"elapsed": 0, "rows": 3, "sql": "SELECT sql FROM sqlite_master WHERE type IN (\"table\",\"index\") AND tbl_name = \"schedule_task\" AND sql IS NOT NULL order by type = \"table\" desc"}
+[2023-08-15 19:54:05] [debug] [gorm.io/driver/sqlite.Migrator.ColumnTypes.func1:119] trace {"elapsed": 0, "rows": -1, "sql": "SELECT * FROM `schedule_task` LIMIT 1"}
+[2023-08-15 19:54:05] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_deleted_at\""}
+[2023-08-15 19:54:05] [info] [main.main:33] apsClient start serve...
+[2023-08-15 19:54:05] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 19:54:10] [info] [apsClient/pkg/contextx.NewContext.func1:44] 192.168.20.120 | GET /v1/task/list?page=1&pageSize=20 | uid: | &{PageInfo:{Page:1 PageSize:20}}
+[2023-08-15 19:54:10] [debug] [apsClient/model.(*ScheduleTaskSearch).Find:195] trace {"elapsed": 0, "rows": 1, "sql": "SELECT count(*) FROM `schedule_task` WHERE `schedule_task`.`deleted_at` IS NULL"}
+[2023-08-15 19:54:10] [debug] [apsClient/model.(*ScheduleTaskSearch).Find:201] trace {"elapsed": 0, "rows": 4, "sql": "SELECT * FROM `schedule_task` WHERE `schedule_task`.`deleted_at` IS NULL LIMIT 20"}
+[2023-08-15 19:54:24] [info] [main.shutdown:48] apsClient exited...
+[2023-08-15 19:54:47] [debug] [gorm.io/driver/sqlite.Migrator.HasTable.func1:33] trace {"elapsed": 0.0010028, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type='table' AND name=\"schedule_task\""}
+[2023-08-15 19:54:47] [debug] [gorm.io/gorm/migrator.Migrator.RunWithValue:71] trace {"elapsed": 0.0010025, "rows": 3, "sql": "SELECT sql FROM sqlite_master WHERE type IN (\"table\",\"index\") AND tbl_name = \"schedule_task\" AND sql IS NOT NULL order by type = \"table\" desc"}
+[2023-08-15 19:54:47] [debug] [gorm.io/driver/sqlite.Migrator.ColumnTypes.func1:119] trace {"elapsed": 0, "rows": -1, "sql": "SELECT * FROM `schedule_task` LIMIT 1"}
+[2023-08-15 19:54:47] [debug] [gorm.io/driver/sqlite.Migrator.HasIndex.func1:313] trace {"elapsed": 0, "rows": -1, "sql": "SELECT count(*) FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"schedule_task\" AND name = \"idx_schedule_task_deleted_at\""}
+[2023-08-15 19:54:47] [info] [main.main:33] apsClient start serve...
+[2023-08-15 19:54:47] [info] [apsClient/nsq.Consume:17] Consume NewNsqConsumer topic:aps.wangpengfei.scheduleTask
+[2023-08-15 19:54:52] [info] [apsClient/pkg/contextx.NewContext.func1:44] 192.168.20.120 | GET /v1/task/list?page=1&pageSize=20 | uid: | &{PageInfo:{Page:1 PageSize:20}}
+[2023-08-15 19:54:52] [debug] [apsClient/model.(*ScheduleTaskSearch).Find:195] trace {"elapsed": 0.0010002, "rows": 1, "sql": "SELECT count(*) FROM `schedule_task` WHERE `schedule_task`.`deleted_at` IS NULL"}
+[2023-08-15 19:54:52] [debug] [apsClient/model.(*ScheduleTaskSearch).Find:201] trace {"elapsed": 0.0009973, "rows": 4, "sql": "SELECT * FROM `schedule_task` WHERE `schedule_task`.`deleted_at` IS NULL LIMIT 20"}
+[2023-08-15 19:55:57] [info] [main.shutdown:48] apsClient exited...
diff --git a/main.go b/main.go
index fbf0511..54c673f 100644
--- a/main.go
+++ b/main.go
@@ -3,6 +3,7 @@
import (
"apsClient/conf"
"apsClient/model"
+ "apsClient/nsq"
"apsClient/pkg/logx"
"apsClient/router"
"fmt"
@@ -23,6 +24,11 @@
return
}
+ if err := nsq.Init(); err != nil {
+ logx.Errorf("nsq Init err:%v", err)
+ return
+ }
+
go shutdown()
logx.Infof("apsClient start serve...")
server := &http.Server{
diff --git a/model/cluster.go b/model/cluster.go
deleted file mode 100644
index 743b1de..0000000
--- a/model/cluster.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package model
-
-import (
- "apsClient/pkg/mysqlx"
- "fmt"
- "gorm.io/gorm"
-)
-
-type (
- Cluster struct {
- ID int `json:"id" gorm:"primaryKey;type:bigint(20);comment:闆嗙兢ID"`
- Name string `json:"name" gorm:"index;type:varchar(255);comment:闆嗙兢鍚嶇О"`
- Description string `json:"description" gorm:"index;type:varchar(255);comment:闆嗙兢鎻忚堪"`
- Devices []Node `json:"nodes" gorm:"hasMany:device"`
- }
-
- Node struct {
- Device
- Role []string `json:"role" gorm:"type:varchar(255);comment:鑺傜偣瑙掕壊"`
- }
-
- ClusterSearch struct {
- Cluster
- Order string
- PageNum int
- PageSize int
- Orm *gorm.DB
- }
-)
-
-func (slf Cluster) TableName() string {
- return "cluster"
-}
-
-func NewClusterSearch(db *gorm.DB) *ClusterSearch {
- if db == nil {
- db = mysqlx.GetDB()
- }
-
- return &ClusterSearch{Orm: db}
-}
-
-func (slf *ClusterSearch) SetOrm(tx *gorm.DB) *ClusterSearch {
- slf.Orm = tx
- return slf
-}
-
-func (slf *ClusterSearch) SetId(id int) *ClusterSearch {
- slf.ID = id
- return slf
-}
-
-func (slf *ClusterSearch) SetName(name string) *ClusterSearch {
- slf.Name = name
- return slf
-}
-
-func (slf *ClusterSearch) build() *gorm.DB {
- var db = slf.Orm.Model(&Cluster{}).Preload("Device")
-
- if slf.ID > 0 {
- db = db.Where("id = ?", slf.ID)
- }
-
- if slf.Name != "" {
- db = db.Where("name = ?", slf.Name)
- }
-
- if slf.Description != "" {
- db = db.Where("description = ?", slf.Description)
- }
-
- return db
-}
-
-func (slf *ClusterSearch) Create(record *Cluster) error {
- var db = slf.build()
-
- if err := db.Create(record).Error; err != nil {
- return fmt.Errorf("create err: %v, record: %+v", err, record)
- }
-
- return nil
-}
-
-func (slf *ClusterSearch) First() (*Cluster, error) {
- var (
- record = new(Cluster)
- db = slf.build()
- )
-
- if err := db.First(record).Error; err != nil {
- return record, err
- }
-
- return record, nil
-}
diff --git a/model/device.go b/model/device.go
deleted file mode 100644
index 9ac1006..0000000
--- a/model/device.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package model
-
-import (
- "apsClient/pkg/mysqlx"
- "fmt"
- "gorm.io/gorm"
-)
-
-type (
- Device struct {
- ID int `json:"id" gorm:"primaryKey;type:bigint(20);comment:璁惧ID"`
- Name string `json:"name" gorm:"index;type:varchar(255);comment:璁惧鍚嶇О"`
- IP string `json:"IP" gorm:"index;type:varchar(255);comment:璁惧IP"`
- Account string `json:"account" gorm:"index;type:varchar(255);comment:root璐﹀彿"`
- Password string `json:"password" gorm:"index;type:varchar(255);comment:root瀵嗙爜"`
- Port string `json:"port" gorm:"index;type:varchar(255);comment:绔彛鍙�"`
- }
-
- DeviceSearch struct {
- Device
- Order string
- PageNum int
- PageSize int
- Orm *gorm.DB
- }
-)
-
-func (slf Device) TableName() string {
- return "device"
-}
-
-func NewDeviceSearch(db *gorm.DB) *DeviceSearch {
- if db == nil {
- db = mysqlx.GetDB()
- }
- return &DeviceSearch{Orm: db}
-}
-
-func (slf *DeviceSearch) SetDeviceName(name string) *DeviceSearch {
- slf.Name = name
- return slf
-}
-
-func (slf *DeviceSearch) SetDeviceIp(ip string) *DeviceSearch {
- slf.IP = ip
- return slf
-}
-
-func (slf *DeviceSearch) First() (*Device, error) {
- var (
- record = new(Device)
- db = slf.build()
- )
-
- if err := db.First(record).Error; err != nil {
- return record, err
- }
-
- return record, nil
-}
-
-func (slf *DeviceSearch) SetPage(page, size int) *DeviceSearch {
- slf.PageNum, slf.PageSize = page, size
- return slf
-}
-
-func (slf *DeviceSearch) SetOrder(order string) *DeviceSearch {
- slf.Order = order
- return slf
-}
-
-func (slf *DeviceSearch) Find() ([]*Device, int64, error) {
- var (
- records = make([]*Device, 0)
- total int64
- db = slf.build()
- )
-
- if err := db.Count(&total).Error; err != nil {
- return records, total, fmt.Errorf("find count err: %v", err)
- }
-
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
-
- if err := db.Find(&records).Error; err != nil {
- return records, total, fmt.Errorf("find records err: %v", err)
- }
-
- return records, total, nil
-}
-
-func (slf *DeviceSearch) build() *gorm.DB {
- var db = slf.Orm.Model(&Device{})
-
- if slf.ID > 0 {
- db = db.Where("id = ?", slf.ID)
- }
-
- if slf.IP != "" {
- db = db.Where("ip = ?", slf.IP)
- }
-
- if slf.Account != "" {
- db = db.Where("account = ?", slf.Account)
- }
-
- if slf.Password != "" {
- db = db.Where("password = ?", slf.Password)
- }
-
- if slf.Port != "" {
- db = db.Where("port = ?", slf.Port)
- }
-
- return db
-}
-
-func (slf *DeviceSearch) Create(record *Device) error {
- var db = slf.build()
-
- if err := db.Create(record).Error; err != nil {
- return fmt.Errorf("create err: %v, record: %+v", err, record)
- }
-
- return nil
-}
-
-func (slf *DeviceSearch) SetId(id int) *DeviceSearch {
- slf.ID = id
- return slf
-}
-
-func (slf *DeviceSearch) SetIds(ids []int) *DeviceSearch {
- slf.Orm = slf.Orm.Where("id in (?)", ids)
- return slf
-}
diff --git a/model/index.go b/model/index.go
index ee5a72f..fb11459 100644
--- a/model/index.go
+++ b/model/index.go
@@ -3,23 +3,22 @@
import (
"apsClient/conf"
"apsClient/pkg/logx"
- "apsClient/pkg/mysqlx"
+ "apsClient/pkg/sqlitex"
)
func Init() error {
- if err := mysqlx.Init(&conf.Conf.Mysql, logx.GetLogger()); err != nil {
+ if err := sqlitex.Init(&conf.Conf.Sqlite, logx.GetLogger()); err != nil {
return err
}
if err := RegisterTables(); err != nil {
return err
}
-
return nil
}
func RegisterTables() error {
- db := mysqlx.GetDB()
- err := db.AutoMigrate()
+ db := sqlitex.GetDB()
+ err := db.AutoMigrate(ScheduleTask{})
return err
}
diff --git a/model/jwt_blacklist.go b/model/jwt_blacklist.go
deleted file mode 100644
index fe53aee..0000000
--- a/model/jwt_blacklist.go
+++ /dev/null
@@ -1,230 +0,0 @@
-package model
-
-import (
- "apsClient/pkg/mysqlx"
- "fmt"
- "gorm.io/gorm"
-)
-
-type (
- JwtBlacklist struct {
- ID uint `gorm:"type:bigint(20);primaryKey"` // 涓婚敭ID
- Jwt string `gorm:"type:text;comment:jwt"`
- CreateTime int64 `json:"createTime" gorm:"type:bigint(20);comment:鍒涘缓鏃堕棿"`
- }
-
- JwtBlacklistSearch struct {
- JwtBlacklist
- Order string
- PageNum int
- PageSize int
- Orm *gorm.DB
- }
-)
-
-func (slf *JwtBlacklist) TableName() string {
- return "jwt_black_list"
-}
-
-func NewJwtBlacklistSearch(db *gorm.DB) *JwtBlacklistSearch {
- if db == nil {
- db = mysqlx.GetDB()
- }
- return &JwtBlacklistSearch{Orm: db}
-}
-
-func (slf *JwtBlacklistSearch) SetOrm(tx *gorm.DB) *JwtBlacklistSearch {
- slf.Orm = tx
- return slf
-}
-
-func (slf *JwtBlacklistSearch) SetPage(page, size int) *JwtBlacklistSearch {
- slf.PageNum, slf.PageSize = page, size
- return slf
-}
-
-func (slf *JwtBlacklistSearch) SetOrder(order string) *JwtBlacklistSearch {
- slf.Order = order
- return slf
-}
-
-func (slf *JwtBlacklistSearch) SetId(id uint) *JwtBlacklistSearch {
- slf.ID = id
- return slf
-}
-
-func (slf *JwtBlacklistSearch) SetJWT(jwt string) *JwtBlacklistSearch {
- slf.Jwt = jwt
- return slf
-}
-
-func (slf *JwtBlacklistSearch) build() *gorm.DB {
- var db = slf.Orm.Model(&JwtBlacklist{})
-
- if slf.ID > 0 {
- db = db.Where("id = ?", slf.ID)
- }
-
- if slf.Jwt != "" {
- db = db.Where("jwt = ?", slf.Jwt)
- }
-
- if slf.Order != "" {
- db = db.Order(slf.Order)
- }
-
- return db
-}
-
-// Create 鍗曟潯鎻掑叆
-func (slf *JwtBlacklistSearch) Create(record *JwtBlacklist) error {
- var db = slf.build()
-
- if err := db.Create(record).Error; err != nil {
- return fmt.Errorf("create err: %v, record: %+v", err, record)
- }
-
- return nil
-}
-
-// CreateBatch 鎵归噺鎻掑叆
-func (slf *JwtBlacklistSearch) CreateBatch(records []JwtBlacklist) error {
- var db = slf.build()
-
- if err := db.Create(&records).Error; err != nil {
- return fmt.Errorf("create batch err: %v, records: %+v", err, records)
- }
-
- return nil
-}
-
-func (slf *JwtBlacklistSearch) Save(record *JwtBlacklist) error {
- var db = slf.build()
-
- if err := db.Save(record).Error; err != nil {
- return fmt.Errorf("save err: %v, record: %+v", err, record)
- }
-
- return nil
-}
-
-func (slf *JwtBlacklistSearch) UpdateByMap(upMap map[string]interface{}) error {
- var (
- db = slf.build()
- )
-
- if err := db.Updates(upMap).Error; err != nil {
- return fmt.Errorf("update by map err: %v, upMap: %+v", err, upMap)
- }
-
- return nil
-}
-
-func (slf *JwtBlacklistSearch) UpdateByQuery(query string, args []interface{}, upMap map[string]interface{}) error {
- var (
- db = slf.Orm.Table(slf.TableName()).Where(query, args...)
- )
-
- if err := db.Updates(upMap).Error; err != nil {
- return fmt.Errorf("update by query err: %v, query: %s, args: %+v, upMap: %+v", err, query, args, upMap)
- }
-
- return nil
-}
-
-func (slf *JwtBlacklistSearch) Delete() error {
- var db = slf.build()
-
- if err := db.Unscoped().Delete(&JwtBlacklist{}).Error; err != nil {
- return err
- }
-
- return nil
-}
-
-func (slf *JwtBlacklistSearch) First() (*JwtBlacklist, error) {
- var (
- record = new(JwtBlacklist)
- db = slf.build()
- )
-
- if err := db.First(record).Error; err != nil {
- return record, err
- }
-
- return record, nil
-}
-
-func (slf *JwtBlacklistSearch) Find() ([]JwtBlacklist, int64, error) {
- var (
- records = make([]JwtBlacklist, 0)
- total int64
- db = slf.build()
- )
-
- if err := db.Count(&total).Error; err != nil {
- return records, total, fmt.Errorf("find count err: %v", err)
- }
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
- if err := db.Find(&records).Error; err != nil {
- return records, total, fmt.Errorf("find records err: %v", err)
- }
-
- return records, total, nil
-}
-
-func (slf *JwtBlacklistSearch) FindNotTotal() ([]JwtBlacklist, error) {
- var (
- records = make([]JwtBlacklist, 0)
- db = slf.build()
- )
-
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
- if err := db.Find(&records).Error; err != nil {
- return records, fmt.Errorf("find records err: %v", err)
- }
-
- return records, nil
-}
-
-// FindByQuery 鎸囧畾鏉′欢鏌ヨ.
-func (slf *JwtBlacklistSearch) FindByQuery(query string, args []interface{}) ([]JwtBlacklist, int64, error) {
- var (
- records = make([]JwtBlacklist, 0)
- total int64
- db = slf.Orm.Table(slf.TableName()).Where(query, args...)
- )
-
- if err := db.Count(&total).Error; err != nil {
- return records, total, fmt.Errorf("find by query count err: %v", err)
- }
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
- if err := db.Find(&records).Error; err != nil {
- return records, total, fmt.Errorf("find by query records err: %v, query: %s, args: %+v", err, query, args)
- }
-
- return records, total, nil
-}
-
-// FindByQueryNotTotal 鎸囧畾鏉′欢鏌ヨ&涓嶆煡璇㈡�绘潯鏁�.
-func (slf *JwtBlacklistSearch) FindByQueryNotTotal(query string, args []interface{}) ([]JwtBlacklist, error) {
- var (
- records = make([]JwtBlacklist, 0)
- db = slf.Orm.Table(slf.TableName()).Where(query, args...)
- )
-
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
- if err := db.Find(&records).Error; err != nil {
- return records, fmt.Errorf("find by query records err: %v, query: %s, args: %+v", err, query, args)
- }
-
- return records, nil
-}
diff --git a/model/user_menu.go b/model/materials.go
similarity index 60%
rename from model/user_menu.go
rename to model/materials.go
index 4e53a25..8fd66e5 100644
--- a/model/user_menu.go
+++ b/model/materials.go
@@ -1,19 +1,29 @@
package model
import (
- "apsClient/pkg/mysqlx"
+ "apsClient/pkg/sqlitex"
"fmt"
"gorm.io/gorm"
)
type (
- UserMenu struct {
- MenuId uint `json:"menuId" gorm:"type:bigint(20);comment:鑿滃崟ID"`
- UserId string `json:"userId" gorm:"index;type:varchar(255);comment:鐢ㄦ埛ID"`
+
+ //"materialId": "PS",
+ //"materialName": "涓ョ墝PS",
+ //"amount": "10",
+ //"unit": "g/銕�"
+
+ Materials struct {
+ gorm.Model `json:"-"`
+ Id int `json:"id"`
+ MaterialId string `json:"materialId"`
+ MaterialName string `json:"materialName"`
+ Amount string `json:"amount"`
+ Unit string `json:"unit"`
}
- UserMenuSearch struct {
- UserMenu
+ MaterialsSearch struct {
+ Materials
Order string
PageNum int
PageSize int
@@ -21,52 +31,34 @@
}
)
-func (slf *UserMenu) TableName() string {
- return "user_menu"
+func (slf *Materials) TableName() string {
+ return "materials"
}
-func NewUserMenuSearch(db *gorm.DB) *UserMenuSearch {
+func NewMaterialsSearch(db *gorm.DB) *MaterialsSearch {
if db == nil {
- db = mysqlx.GetDB()
+ db = sqlitex.GetDB()
}
- return &UserMenuSearch{Orm: db}
+ return &MaterialsSearch{Orm: db}
}
-func (slf *UserMenuSearch) SetOrm(tx *gorm.DB) *UserMenuSearch {
+func (slf *MaterialsSearch) SetOrm(tx *gorm.DB) *MaterialsSearch {
slf.Orm = tx
return slf
}
-func (slf *UserMenuSearch) SetPage(page, size int) *UserMenuSearch {
+func (slf *MaterialsSearch) SetPage(page, size int) *MaterialsSearch {
slf.PageNum, slf.PageSize = page, size
return slf
}
-func (slf *UserMenuSearch) SetOrder(order string) *UserMenuSearch {
+func (slf *MaterialsSearch) SetOrder(order string) *MaterialsSearch {
slf.Order = order
return slf
}
-func (slf *UserMenuSearch) SetUserId(userId string) *UserMenuSearch {
- slf.UserId = userId
- return slf
-}
-
-func (slf *UserMenuSearch) SetMenuId(menuId uint) *UserMenuSearch {
- slf.MenuId = menuId
- return slf
-}
-
-func (slf *UserMenuSearch) build() *gorm.DB {
- var db = slf.Orm.Model(&UserMenu{})
-
- if slf.MenuId > 0 {
- db = db.Where("menu_id = ?", slf.MenuId)
- }
-
- if slf.UserId != "" {
- db = db.Where("user_id = ?", slf.UserId)
- }
+func (slf *MaterialsSearch) build() *gorm.DB {
+ var db = slf.Orm.Model(&Materials{})
if slf.Order != "" {
db = db.Order(slf.Order)
@@ -76,7 +68,7 @@
}
// Create 鍗曟潯鎻掑叆
-func (slf *UserMenuSearch) Create(record *UserMenu) error {
+func (slf *MaterialsSearch) Create(record *Materials) error {
var db = slf.build()
if err := db.Create(record).Error; err != nil {
@@ -87,7 +79,7 @@
}
// CreateBatch 鎵归噺鎻掑叆
-func (slf *UserMenuSearch) CreateBatch(records []*UserMenu) error {
+func (slf *MaterialsSearch) CreateBatch(records []*Materials) error {
var db = slf.build()
if err := db.Create(&records).Error; err != nil {
@@ -97,7 +89,7 @@
return nil
}
-func (slf *UserMenuSearch) Save(record *UserMenu) error {
+func (slf *MaterialsSearch) Save(record *Materials) error {
var db = slf.build()
if err := db.Save(record).Error; err != nil {
@@ -107,7 +99,7 @@
return nil
}
-func (slf *UserMenuSearch) UpdateByMap(upMap map[string]interface{}) error {
+func (slf *MaterialsSearch) UpdateByMap(upMap map[string]interface{}) error {
var (
db = slf.build()
)
@@ -119,7 +111,7 @@
return nil
}
-func (slf *UserMenuSearch) UpdateByQuery(query string, args []interface{}, upMap map[string]interface{}) error {
+func (slf *MaterialsSearch) UpdateByQuery(query string, args []interface{}, upMap map[string]interface{}) error {
var (
db = slf.Orm.Table(slf.TableName()).Where(query, args...)
)
@@ -131,19 +123,19 @@
return nil
}
-func (slf *UserMenuSearch) Delete() error {
+func (slf *MaterialsSearch) Delete() error {
var db = slf.build()
- if err := db.Unscoped().Delete(&UserMenu{}).Error; err != nil {
+ if err := db.Unscoped().Delete(&Materials{}).Error; err != nil {
return err
}
return nil
}
-func (slf *UserMenuSearch) First() (*UserMenu, error) {
+func (slf *MaterialsSearch) First() (*Materials, error) {
var (
- record = new(UserMenu)
+ record = new(Materials)
db = slf.build()
)
@@ -154,9 +146,9 @@
return record, nil
}
-func (slf *UserMenuSearch) Find() ([]*UserMenu, int64, error) {
+func (slf *MaterialsSearch) Find() ([]*Materials, int64, error) {
var (
- records = make([]*UserMenu, 0)
+ records = make([]*Materials, 0)
total int64
db = slf.build()
)
@@ -174,9 +166,9 @@
return records, total, nil
}
-func (slf *UserMenuSearch) FindNotTotal() ([]*UserMenu, error) {
+func (slf *MaterialsSearch) FindNotTotal() ([]*Materials, error) {
var (
- records = make([]*UserMenu, 0)
+ records = make([]*Materials, 0)
db = slf.build()
)
@@ -191,9 +183,9 @@
}
// FindByQuery 鎸囧畾鏉′欢鏌ヨ.
-func (slf *UserMenuSearch) FindByQuery(query string, args []interface{}) ([]*UserMenu, int64, error) {
+func (slf *MaterialsSearch) FindByQuery(query string, args []interface{}) ([]*Materials, int64, error) {
var (
- records = make([]*UserMenu, 0)
+ records = make([]*Materials, 0)
total int64
db = slf.Orm.Table(slf.TableName()).Where(query, args...)
)
@@ -212,9 +204,9 @@
}
// FindByQueryNotTotal 鎸囧畾鏉′欢鏌ヨ&涓嶆煡璇㈡�绘潯鏁�.
-func (slf *UserMenuSearch) FindByQueryNotTotal(query string, args []interface{}) ([]*UserMenu, error) {
+func (slf *MaterialsSearch) FindByQueryNotTotal(query string, args []interface{}) ([]*Materials, error) {
var (
- records = make([]*UserMenu, 0)
+ records = make([]*Materials, 0)
db = slf.Orm.Table(slf.TableName()).Where(query, args...)
)
diff --git a/model/menu.go b/model/menu.go
deleted file mode 100644
index 5810f37..0000000
--- a/model/menu.go
+++ /dev/null
@@ -1,279 +0,0 @@
-package model
-
-import (
- "apsClient/pkg/mysqlx"
- "fmt"
- "gorm.io/gorm"
- "time"
-)
-
-type (
- Menu struct {
- ID uint `json:"id" gorm:"type:bigint(20);primaryKey"` // 涓婚敭ID
- ParentId uint `json:"parentId" gorm:"index;type:bigint(20);comment:鐖惰彍鍗旾D"`
- Path string `json:"path" gorm:"type:varchar(255);comment:璺敱path"`
- Name string `json:"name" gorm:"type:varchar(255);comment:name"`
- Title string `json:"title" gorm:"type:varchar(255);comment:鏍囬"`
- Sort int `json:"sort" gorm:"type:int(11);comment:鎺掑簭鏍囪"`
- Icon string `json:"icon" gorm:"type:varchar(512);comment:鑿滃崟鍥炬爣"`
- Hidden bool `json:"hidden" gorm:"type:tinyint(1);comment:鏄惁闅愯棌"`
- Type int `json:"type" gorm:"type:int(11);comment:绫诲瀷 0-鐩綍 1-鑿滃崟 2-鎸夐挳"`
- CreateTime int64 `json:"-" gorm:"type:bigint(20);comment:鍒涘缓鏃堕棿"`
- UpdateTime int64 `json:"-" gorm:"type:bigint(20);comment:鏇存柊鏃堕棿"`
- Children []*Menu `json:"children" gorm:"-"`
- CreateAt string `json:"createAt" gorm:"-"` // 鍒涘缓鏃堕棿
- UpdateAt string `json:"updateAt" gorm:"-"` // 鏇存柊鏃堕棿
- }
-
- MenuSearch struct {
- Menu
- Ids []uint
- Order string
- PageNum int
- PageSize int
- Orm *gorm.DB
- }
-)
-
-func (slf Menu) TableName() string {
- return "menu"
-}
-
-func (slf *Menu) BeforeCreate(tx *gorm.DB) error {
- slf.CreateTime = time.Now().Unix()
- slf.UpdateTime = slf.CreateTime
- return nil
-}
-
-func (slf *Menu) BeforeSave(tx *gorm.DB) error {
- slf.UpdateTime = time.Now().Unix()
- return nil
-}
-
-func (slf *Menu) BeforeUpdate(tx *gorm.DB) error {
- slf.UpdateTime = time.Now().Unix()
- return nil
-}
-
-func (slf *Menu) AfterFind(tx *gorm.DB) error {
- slf.CreateAt = time.Unix(slf.CreateTime, 0).Format("2006-01-02 15:04:05")
- slf.UpdateAt = time.Unix(slf.UpdateTime, 0).Format("2006-01-02 15:04:05")
- return nil
-}
-
-func NewMenuSearch(db *gorm.DB) *MenuSearch {
- if db == nil {
- db = mysqlx.GetDB()
- }
- return &MenuSearch{Orm: db}
-}
-
-func (slf *MenuSearch) SetOrm(tx *gorm.DB) *MenuSearch {
- slf.Orm = tx
- return slf
-}
-
-func (slf *MenuSearch) SetPage(page, size int) *MenuSearch {
- slf.PageNum, slf.PageSize = page, size
- return slf
-}
-
-func (slf *MenuSearch) SetOrder(order string) *MenuSearch {
- slf.Order = order
- return slf
-}
-
-func (slf *MenuSearch) SetId(id uint) *MenuSearch {
- slf.ID = id
- return slf
-}
-
-func (slf *MenuSearch) SetParentId(parentId uint) *MenuSearch {
- slf.ParentId = parentId
- return slf
-}
-
-func (slf *MenuSearch) SetIds(ids []uint) *MenuSearch {
- slf.Ids = ids
- return slf
-}
-
-func (slf *MenuSearch) SetName(name string) *MenuSearch {
- slf.Name = name
- return slf
-}
-
-func (slf *MenuSearch) build() *gorm.DB {
- var db = slf.Orm.Model(&Menu{})
-
- if slf.ID > 0 {
- db = db.Where("id = ?", slf.ID)
- }
-
- if slf.ParentId > 0 {
- db = db.Where("parent_id = ?", slf.ParentId)
- }
-
- if len(slf.Ids) > 0 {
- db = db.Where("id in (?)", slf.Ids)
- }
-
- if slf.Order != "" {
- db = db.Order(slf.Order)
- }
-
- return db
-}
-
-// Create 鍗曟潯鎻掑叆
-func (slf *MenuSearch) Create(record *Menu) error {
- var db = slf.build()
-
- if err := db.Create(record).Error; err != nil {
- return fmt.Errorf("create err: %v, record: %+v", err, record)
- }
-
- return nil
-}
-
-// CreateBatch 鎵归噺鎻掑叆
-func (slf *MenuSearch) CreateBatch(records []*Menu) error {
- var db = slf.build()
-
- if err := db.Create(&records).Error; err != nil {
- return fmt.Errorf("create batch err: %v, records: %+v", err, records)
- }
-
- return nil
-}
-
-func (slf *MenuSearch) Save(record *Menu) error {
- var db = slf.build()
-
- if err := db.Save(record).Error; err != nil {
- return fmt.Errorf("save err: %v, record: %+v", err, record)
- }
-
- return nil
-}
-
-func (slf *MenuSearch) UpdateByMap(upMap map[string]interface{}) error {
- var (
- db = slf.build()
- )
-
- if err := db.Updates(upMap).Error; err != nil {
- return fmt.Errorf("update by map err: %v, upMap: %+v", err, upMap)
- }
-
- return nil
-}
-
-func (slf *MenuSearch) UpdateByQuery(query string, args []interface{}, upMap map[string]interface{}) error {
- var (
- db = slf.Orm.Table(slf.TableName()).Where(query, args...)
- )
-
- if err := db.Updates(upMap).Error; err != nil {
- return fmt.Errorf("update by query err: %v, query: %s, args: %+v, upMap: %+v", err, query, args, upMap)
- }
-
- return nil
-}
-
-func (slf *MenuSearch) Delete() error {
- var db = slf.build().Where("1=1")
-
- if err := db.Unscoped().Delete(&Menu{}).Error; err != nil {
- return err
- }
-
- return nil
-}
-
-func (slf *MenuSearch) First() (*Menu, error) {
- var (
- record = new(Menu)
- db = slf.build()
- )
-
- if err := db.First(record).Error; err != nil {
- return record, err
- }
-
- return record, nil
-}
-
-func (slf *MenuSearch) Find() ([]*Menu, int64, error) {
- var (
- records = make([]*Menu, 0)
- total int64
- db = slf.build()
- )
-
- if err := db.Count(&total).Error; err != nil {
- return records, total, fmt.Errorf("find count err: %v", err)
- }
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
- if err := db.Find(&records).Error; err != nil {
- return records, total, fmt.Errorf("find records err: %v", err)
- }
-
- return records, total, nil
-}
-
-func (slf *MenuSearch) FindNotTotal() ([]*Menu, error) {
- var (
- records = make([]*Menu, 0)
- db = slf.build()
- )
-
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
- if err := db.Find(&records).Error; err != nil {
- return records, fmt.Errorf("find records err: %v", err)
- }
-
- return records, nil
-}
-
-// FindByQuery 鎸囧畾鏉′欢鏌ヨ.
-func (slf *MenuSearch) FindByQuery(query string, args []interface{}) ([]*Menu, int64, error) {
- var (
- records = make([]*Menu, 0)
- total int64
- db = slf.Orm.Table(slf.TableName()).Where(query, args...)
- )
-
- if err := db.Count(&total).Error; err != nil {
- return records, total, fmt.Errorf("find by query count err: %v", err)
- }
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
- if err := db.Find(&records).Error; err != nil {
- return records, total, fmt.Errorf("find by query records err: %v, query: %s, args: %+v", err, query, args)
- }
-
- return records, total, nil
-}
-
-// FindByQueryNotTotal 鎸囧畾鏉′欢鏌ヨ&涓嶆煡璇㈡�绘潯鏁�.
-func (slf *MenuSearch) FindByQueryNotTotal(query string, args []interface{}) ([]*Menu, error) {
- var (
- records = make([]*Menu, 0)
- db = slf.Orm.Table(slf.TableName()).Where(query, args...)
- )
-
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
- if err := db.Find(&records).Error; err != nil {
- return records, fmt.Errorf("find by query records err: %v, query: %s, args: %+v", err, query, args)
- }
-
- return records, nil
-}
diff --git a/model/mysql.go b/model/mysql.go
deleted file mode 100644
index ced9c34..0000000
--- a/model/mysql.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package model
-
-import (
- "apsClient/conf"
- "apsClient/pkg/logx"
- "apsClient/pkg/mysqlx"
- "fmt"
- "gorm.io/gorm"
-)
-
-type Mysql struct{}
-
-func NewMysql() *Mysql {
- return &Mysql{}
-}
-
-func (slf *Mysql) CreateUser(userName string, password string, database string) error {
- db := mysqlx.GetDB()
- err := db.Transaction(func(tx *gorm.DB) error {
- userSql := fmt.Sprintf("create user if not exists %v@'%v' identified by '%v'", userName, conf.Conf.Mysql.Host, password)
- privilegeSql := fmt.Sprintf("grant all privileges on %v.* to %v@'%v'", database, userName, conf.Conf.Mysql.Host)
- flushSql := "flush privileges"
-
- err := tx.Exec(userSql).Error
- if err != nil {
- logx.Errorf("create mysql-user err:%v", err)
- return err
- }
-
- err = tx.Exec(privilegeSql).Error
- if err != nil {
- logx.Errorf("grant mysql-privileges err:%v", err)
- return err
- }
-
- err = tx.Exec(flushSql).Error
- if err != nil {
- logx.Errorf("flush mysql-privilege err:%v", err)
- return err
- }
- return nil
- })
- return err
-}
-
-func (slf *Mysql) CreateDatabase(database string) error {
- db := mysqlx.GetDB()
- //sql := fmt.Sprintf("create database if not exists %v default charset utf8mb4 collate utf8mb4_general_ci", database)
- sql := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s` DEFAULT CHARACTER SET utf8mb4 DEFAULT COLLATE utf8mb4_general_ci;", database)
- err := db.Exec(sql).Error
- if err != nil {
- logx.Errorf("create database err:%v", err)
- return err
- }
- return nil
-}
diff --git a/model/procedures.go b/model/procedures.go
new file mode 100644
index 0000000..1dfc611
--- /dev/null
+++ b/model/procedures.go
@@ -0,0 +1,230 @@
+package model
+
+import (
+ "apsClient/pkg/sqlitex"
+ "fmt"
+ "gorm.io/gorm"
+)
+
+type (
+ Procedures struct {
+ gorm.Model `json:"-"`
+ Id int `json:"id"`
+ ProcedureId string `json:"orderId"`
+ ProcedureName string `json:"product"`
+ DeviceId string `json:"deviceId"`
+ StartTime string `json:"startTime"`
+ EndTime string `json:"endTime"`
+ WorkHours string `json:"workHours"`
+ InputMaterialsId int `json:"-"`
+ InputMaterials *Materials `json:"inputMaterials" gorm:"foreignKey:InputMaterialsId"`
+ OutputMaterialsId int `json:"-"`
+ OutputMaterials *Materials `json:"outputMaterials" gorm:"foreignKey:InputMaterialsId"`
+ }
+
+ ProceduresSearch struct {
+ Procedures
+ Order string
+ PageNum int
+ PageSize int
+ Orm *gorm.DB
+ Preload bool
+ }
+)
+
+func (slf *Procedures) TableName() string {
+ return "procedures"
+}
+
+func NewProceduresSearch(db *gorm.DB) *ProceduresSearch {
+ if db == nil {
+ db = sqlitex.GetDB()
+ }
+ return &ProceduresSearch{Orm: db}
+}
+
+func (slf *ProceduresSearch) SetOrm(tx *gorm.DB) *ProceduresSearch {
+ slf.Orm = tx
+ return slf
+}
+
+func (slf *ProceduresSearch) SetPage(page, size int) *ProceduresSearch {
+ slf.PageNum, slf.PageSize = page, size
+ return slf
+}
+
+func (slf *ProceduresSearch) SetOrder(order string) *ProceduresSearch {
+ slf.Order = order
+ return slf
+}
+
+func (slf *ProceduresSearch) SetPreload(preload bool) *ProceduresSearch {
+ slf.Preload = preload
+ return slf
+}
+
+func (slf *ProceduresSearch) build() *gorm.DB {
+ var db = slf.Orm.Model(&Procedures{})
+
+ if slf.Order != "" {
+ db = db.Order(slf.Order)
+ }
+ if slf.Preload {
+ db = db.Preload("InputMaterials").Preload("OutputMaterials")
+ }
+
+ return db
+}
+
+// Create 鍗曟潯鎻掑叆
+func (slf *ProceduresSearch) Create(record *Procedures) error {
+ var db = slf.build()
+
+ if err := db.Create(record).Error; err != nil {
+ return fmt.Errorf("create err: %v, record: %+v", err, record)
+ }
+
+ return nil
+}
+
+// CreateBatch 鎵归噺鎻掑叆
+func (slf *ProceduresSearch) CreateBatch(records []*Procedures) error {
+ var db = slf.build()
+
+ if err := db.Create(&records).Error; err != nil {
+ return fmt.Errorf("create batch err: %v, records: %+v", err, records)
+ }
+
+ return nil
+}
+
+func (slf *ProceduresSearch) Save(record *Procedures) error {
+ var db = slf.build()
+
+ if err := db.Save(record).Error; err != nil {
+ return fmt.Errorf("save err: %v, record: %+v", err, record)
+ }
+
+ return nil
+}
+
+func (slf *ProceduresSearch) UpdateByMap(upMap map[string]interface{}) error {
+ var (
+ db = slf.build()
+ )
+
+ if err := db.Updates(upMap).Error; err != nil {
+ return fmt.Errorf("update by map err: %v, upMap: %+v", err, upMap)
+ }
+
+ return nil
+}
+
+func (slf *ProceduresSearch) UpdateByQuery(query string, args []interface{}, upMap map[string]interface{}) error {
+ var (
+ db = slf.Orm.Table(slf.TableName()).Where(query, args...)
+ )
+
+ if err := db.Updates(upMap).Error; err != nil {
+ return fmt.Errorf("update by query err: %v, query: %s, args: %+v, upMap: %+v", err, query, args, upMap)
+ }
+
+ return nil
+}
+
+func (slf *ProceduresSearch) Delete() error {
+ var db = slf.build()
+
+ if err := db.Unscoped().Delete(&Procedures{}).Error; err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (slf *ProceduresSearch) First() (*Procedures, error) {
+ var (
+ record = new(Procedures)
+ db = slf.build()
+ )
+
+ if err := db.First(record).Error; err != nil {
+ return record, err
+ }
+
+ return record, nil
+}
+
+func (slf *ProceduresSearch) Find() ([]*Procedures, int64, error) {
+ var (
+ records = make([]*Procedures, 0)
+ total int64
+ db = slf.build()
+ )
+
+ if err := db.Count(&total).Error; err != nil {
+ return records, total, fmt.Errorf("find count err: %v", err)
+ }
+ if slf.PageNum*slf.PageSize > 0 {
+ db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
+ }
+ if err := db.Find(&records).Error; err != nil {
+ return records, total, fmt.Errorf("find records err: %v", err)
+ }
+
+ return records, total, nil
+}
+
+func (slf *ProceduresSearch) FindNotTotal() ([]*Procedures, error) {
+ var (
+ records = make([]*Procedures, 0)
+ db = slf.build()
+ )
+
+ if slf.PageNum*slf.PageSize > 0 {
+ db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
+ }
+ if err := db.Find(&records).Error; err != nil {
+ return records, fmt.Errorf("find records err: %v", err)
+ }
+
+ return records, nil
+}
+
+// FindByQuery 鎸囧畾鏉′欢鏌ヨ.
+func (slf *ProceduresSearch) FindByQuery(query string, args []interface{}) ([]*Procedures, int64, error) {
+ var (
+ records = make([]*Procedures, 0)
+ total int64
+ db = slf.Orm.Table(slf.TableName()).Where(query, args...)
+ )
+
+ if err := db.Count(&total).Error; err != nil {
+ return records, total, fmt.Errorf("find by query count err: %v", err)
+ }
+ if slf.PageNum*slf.PageSize > 0 {
+ db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
+ }
+ if err := db.Find(&records).Error; err != nil {
+ return records, total, fmt.Errorf("find by query records err: %v, query: %s, args: %+v", err, query, args)
+ }
+
+ return records, total, nil
+}
+
+// FindByQueryNotTotal 鎸囧畾鏉′欢鏌ヨ&涓嶆煡璇㈡�绘潯鏁�.
+func (slf *ProceduresSearch) FindByQueryNotTotal(query string, args []interface{}) ([]*Procedures, error) {
+ var (
+ records = make([]*Procedures, 0)
+ db = slf.Orm.Table(slf.TableName()).Where(query, args...)
+ )
+
+ if slf.PageNum*slf.PageSize > 0 {
+ db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
+ }
+ if err := db.Find(&records).Error; err != nil {
+ return records, fmt.Errorf("find by query records err: %v, query: %s, args: %+v", err, query, args)
+ }
+
+ return records, nil
+}
diff --git a/model/request/jwt.go b/model/request/jwt.go
deleted file mode 100644
index f16c7f9..0000000
--- a/model/request/jwt.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package request
-
-import (
- "apsClient/constvar"
- "github.com/golang-jwt/jwt/v4"
-)
-
-// Custom claims structure
-type CustomClaims struct {
- BaseClaims
- BufferTime int64
- jwt.StandardClaims
-}
-
-type BaseClaims struct {
- UserId string
- Username string
- ParentId string
- UserType constvar.UserType
-}
diff --git a/model/request/schedule_task.go b/model/request/schedule_task.go
new file mode 100644
index 0000000..43ca48c
--- /dev/null
+++ b/model/request/schedule_task.go
@@ -0,0 +1,50 @@
+package request
+
+import "github.com/shopspring/decimal"
+
+// 鎺掔▼浠诲姟涓嬪彂
+type (
+ Order struct {
+ OrderID string `gorm:"index;type:varchar(191);not null;comment:璁㈠崟ID" json:"orderId"`
+ ProductID string `gorm:"type:varchar(191);comment:浜у搧ID" json:"productId"`
+ ProductName string `gorm:"type:varchar(191);comment:浜у搧鍚嶇О" json:"productName"`
+ Parameter string `gorm:"type:varchar(1024);comment:鍙傛暟闇�姹�" json:"parameter"`
+ Customer string `gorm:"type:varchar(191);comment:瀹㈡埛缂栫爜" json:"customer"`
+ DeliverDate string `gorm:"type:varchar(100);comment:浜よ揣鏃ユ湡" json:"deliverDate"`
+ OrderAttr string `json:"orderAttr"` // 璁㈠崟灞炴�ф嫾鎺ョ殑瀛楃涓诧紝鍗宠揣鐗╂弿杩�
+ Amount decimal.Decimal `gorm:"type:decimal(35,18);comment:鏁伴噺" json:"amount"`
+ Unit string `gorm:"type:varchar(100);comment:鍗曚綅" json:"unit"`
+ StartTime int64 `gorm:"comment:璁″垝寮�濮嬫椂闂�" json:"startTime"`
+ EndTime int64 `gorm:"comment:璁″垝缁撴潫鏃堕棿" json:"endTime"`
+ }
+
+ ProcedureMaterial struct {
+ MaterialID string `gorm:"type:varchar(191);comment:鐗╂枡缂栧彿" json:"materialId"`
+ MaterialName string `gorm:"unique;type:varchar(191);not null;comment:鐗╂枡鍚嶇О" json:"materialName"`
+ Amount decimal.Decimal `gorm:"type:decimal(35,18);comment:鏁伴噺" json:"amount"`
+ Unit string `gorm:"type:varchar(191);comment:鍗曚綅" json:"unit"`
+ }
+
+ ProcedureWorker struct {
+ WorkerID string `gorm:"type:varchar(2048);comment:浜哄憳ID" json:"workerId"`
+ WorkerName string `gorm:"unique;type:varchar(191);not null;comment:浜哄憳濮撳悕" json:"workerName"`
+ PhoneNum string `gorm:"type:varchar(191);comment:鎵嬫満鍙�" json:"phoneNum"`
+ }
+
+ ProductProcedure struct {
+ ProcedureID string `gorm:"uniqueIndex:idx_product_procedure;type:varchar(191);comment:宸ュ簭ID" json:"procedureId"`
+ ProcedureName string `gorm:"type:varchar(191);comment:宸ュ簭鍚嶇О锛屼粎鏌ヨ鐢�" json:"procedureName"`
+ DeviceID string `gorm:"type:varchar(191);not null;comment:璁惧ID" json:"deviceId"`
+ StartTime int64 `gorm:"comment:璁″垝寮�濮嬫椂闂�" json:"startTime"`
+ EndTime int64 `gorm:"comment:璁″垝缁撴潫鏃堕棿" json:"endTime"`
+ WorkHours decimal.Decimal `gorm:"type:decimal(35,18);comment:宸ユ椂" json:"workHours"`
+ InputMaterials []*ProcedureMaterial `json:"inputMaterials"` // 杈撳叆鐗╂枡鍒楄〃
+ OutputMaterials []*ProcedureMaterial `json:"outputMaterials"` // 杈撳嚭鐗╂枡鍒楄〃
+ Workers []*ProcedureWorker `json:"workers"` // 浜哄憳鍒楄〃
+ }
+
+ DeliverScheduleTask struct {
+ Order Order `json:"order"`
+ Procedures []*ProductProcedure `json:"procedures"` // 宸ュ簭鍒楄〃
+ }
+)
diff --git a/model/request/task.go b/model/request/task.go
index 68b9357..70478a2 100644
--- a/model/request/task.go
+++ b/model/request/task.go
@@ -8,3 +8,8 @@
WorkOrder string `json:"workOrder"` // 宸ュ崟
Device string `json:"device"` // 璁惧
}
+
+// TaskList 浠诲姟鍒楄〃璇锋眰鍙傛暟
+type TaskList struct {
+ PageInfo
+}
diff --git a/model/request/user.go b/model/request/user.go
index 4b9aab0..f2b3d57 100644
--- a/model/request/user.go
+++ b/model/request/user.go
@@ -1,7 +1,5 @@
package request
-import "apsClient/constvar"
-
type (
Login struct {
Username string `json:"username"` // 鐢ㄦ埛鍚�
@@ -32,12 +30,6 @@
NickName string `json:"nickName"` // 鐢ㄦ埛鏄电О
Phone string `json:"phone"` // 鐢ㄦ埛鎵嬫満鍙�
Pos string `json:"pos"` // 鐢ㄦ埛宀椾綅
- }
-
- Examine struct {
- ID string `json:"id"`
- Status constvar.UserStatus `json:"status"`
- Clusters string `json:"clusters"`
}
GetUserList struct {
diff --git a/model/response/response.go b/model/response/response.go
deleted file mode 100644
index 90b1954..0000000
--- a/model/response/response.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package response
-
-import (
- "apsClient/model"
-)
-
-type (
- CaptchaResponse struct {
- CaptchaId string `json:"captchaId"`
- PicPath string `json:"picPath"`
- CaptchaLength int `json:"captchaLength"`
- OpenCaptcha bool `json:"openCaptcha"`
- }
-
- UserResponse struct {
- User model.User `json:"user"`
- }
-
- LoginResponse struct {
- User model.User `json:"user"`
- Token string `json:"token"`
- ExpiresAt int64 `json:"expiresAt"`
- }
-
- MenusResponse struct {
- Menus []*model.Menu `json:"menus"`
- }
-
- MenuResponse struct {
- Menu model.Menu `json:"menu"`
- }
-
- DevicesResponse struct {
- Devices []*model.Device `json:"devices"`
- }
-
- DeviceResponse struct {
- Device model.Device `json:"device"`
- }
-
- ClusterResponse struct {
- Cluster model.Cluster `json:"cluster"`
- }
-
- ClustersResponse struct {
- Clusters []string `json:"clusters"`
- }
-)
diff --git a/model/schedule_task.go b/model/schedule_task.go
new file mode 100644
index 0000000..b8ab664
--- /dev/null
+++ b/model/schedule_task.go
@@ -0,0 +1,260 @@
+package model
+
+import (
+ "apsClient/model/request"
+ "apsClient/pkg/sqlitex"
+ "fmt"
+ "github.com/shopspring/decimal"
+ "gorm.io/gorm"
+)
+
+//{
+//"orderId": "0100000",
+//"productId": "PE500A01D/F",
+//"productName": "鍓嶇汉姣涘竷",
+//"parameter": "",
+//"customer": "1200",
+//"deliverDate": "2023-08-16",
+//"orderAttr": "浜у搧绫诲埆:娑ょ憾",
+//"amount": "1200",
+//"unit": "",
+//"startTime": 1692328320,
+//"endTime": 1692946800
+//},
+
+type (
+ ScheduleTask struct {
+ gorm.Model `json:"-"`
+ Id int `json:"id"`
+ OrderId string `json:"orderId"` //璁㈠崟id
+ ProductId string `json:"productId"` //璁㈠崟id
+ ProductName string `json:"productName"` //浜у搧鍚嶇О
+ //Parameter string `json:"parameter"` //浜у搧鍚嶇О
+ //Customer string `json:"customer"`
+ //DeliverDate string `json:"deliverDate"`
+ //OrderAttr string `json:"orderAttr"`
+ Amount decimal.Decimal `json:"amount"`
+ Unit string `json:"unit"`
+ StartTime int64 `json:"startTime"`
+ EndTime int64 `json:"endTime"`
+ Data string `json:"data"` //鎺掔▼浠诲姟json涓�
+ TaskInfo request.DeliverScheduleTask `json:"taskInfo" gorm:"-"` //鎺掔▼浠诲姟json涓�
+ //Procedures []*Procedures `json:"Procedures" gorm:"many2many:ScheduleTask_Procedures;"`
+ }
+
+ ScheduleTaskSearch struct {
+ ScheduleTask
+ Order string
+ PageNum int
+ PageSize int
+ Orm *gorm.DB
+ Preload bool
+ }
+)
+
+func (slf *ScheduleTask) TableName() string {
+ return "schedule_task"
+}
+
+func NewScheduleTaskSearch(db *gorm.DB) *ScheduleTaskSearch {
+ if db == nil {
+ db = sqlitex.GetDB()
+ }
+ return &ScheduleTaskSearch{Orm: db}
+}
+
+func (slf *ScheduleTaskSearch) SetOrm(tx *gorm.DB) *ScheduleTaskSearch {
+ slf.Orm = tx
+ return slf
+}
+
+func (slf *ScheduleTaskSearch) SetPage(page, size int) *ScheduleTaskSearch {
+ slf.PageNum, slf.PageSize = page, size
+ return slf
+}
+
+func (slf *ScheduleTaskSearch) SetOrder(order string) *ScheduleTaskSearch {
+ slf.Order = order
+ return slf
+}
+
+func (slf *ScheduleTaskSearch) SetPreload(preload bool) *ScheduleTaskSearch {
+ slf.Preload = preload
+ return slf
+}
+
+func (slf *ScheduleTaskSearch) SetEndTime(endTime int64) *ScheduleTaskSearch {
+ slf.EndTime = endTime
+ return slf
+}
+
+func (slf *ScheduleTaskSearch) build() *gorm.DB {
+ var db = slf.Orm.Model(&ScheduleTask{})
+
+ if slf.Order != "" {
+ db = db.Order(slf.Order)
+ }
+
+ if slf.EndTime > 0 {
+ db = db.Where("end_time < ?", slf.EndTime)
+ }
+
+ //if slf.Preload {
+ // db = db.Preload("Procedures")
+ //}
+
+ return db
+}
+
+// Create 鍗曟潯鎻掑叆
+func (slf *ScheduleTaskSearch) Create(record *ScheduleTask) error {
+ var db = slf.build()
+
+ if err := db.Create(record).Error; err != nil {
+ return fmt.Errorf("create err: %v, record: %+v", err, record)
+ }
+
+ return nil
+}
+
+// CreateBatch 鎵归噺鎻掑叆
+func (slf *ScheduleTaskSearch) CreateBatch(records []*ScheduleTask) error {
+ var db = slf.build()
+
+ if err := db.Create(&records).Error; err != nil {
+ return fmt.Errorf("create batch err: %v, records: %+v", err, records)
+ }
+
+ return nil
+}
+
+func (slf *ScheduleTaskSearch) Save(record *ScheduleTask) error {
+ var db = slf.build()
+
+ if err := db.Save(record).Error; err != nil {
+ return fmt.Errorf("save err: %v, record: %+v", err, record)
+ }
+
+ return nil
+}
+
+func (slf *ScheduleTaskSearch) UpdateByMap(upMap map[string]interface{}) error {
+ var (
+ db = slf.build()
+ )
+
+ if err := db.Updates(upMap).Error; err != nil {
+ return fmt.Errorf("update by map err: %v, upMap: %+v", err, upMap)
+ }
+
+ return nil
+}
+
+func (slf *ScheduleTaskSearch) UpdateByQuery(query string, args []interface{}, upMap map[string]interface{}) error {
+ var (
+ db = slf.Orm.Table(slf.TableName()).Where(query, args...)
+ )
+
+ if err := db.Updates(upMap).Error; err != nil {
+ return fmt.Errorf("update by query err: %v, query: %s, args: %+v, upMap: %+v", err, query, args, upMap)
+ }
+
+ return nil
+}
+
+func (slf *ScheduleTaskSearch) Delete() error {
+ var db = slf.build()
+
+ if err := db.Unscoped().Delete(&ScheduleTask{}).Error; err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (slf *ScheduleTaskSearch) First() (*ScheduleTask, error) {
+ var (
+ record = new(ScheduleTask)
+ db = slf.build()
+ )
+
+ if err := db.First(record).Error; err != nil {
+ return record, err
+ }
+
+ return record, nil
+}
+
+func (slf *ScheduleTaskSearch) Find() ([]*ScheduleTask, int64, error) {
+ var (
+ records = make([]*ScheduleTask, 0)
+ total int64
+ db = slf.build()
+ )
+
+ if err := db.Count(&total).Error; err != nil {
+ return records, total, fmt.Errorf("find count err: %v", err)
+ }
+ if slf.PageNum*slf.PageSize > 0 {
+ db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
+ }
+ if err := db.Find(&records).Error; err != nil {
+ return records, total, fmt.Errorf("find records err: %v", err)
+ }
+
+ return records, total, nil
+}
+
+func (slf *ScheduleTaskSearch) FindNotTotal() ([]*ScheduleTask, error) {
+ var (
+ records = make([]*ScheduleTask, 0)
+ db = slf.build()
+ )
+
+ if slf.PageNum*slf.PageSize > 0 {
+ db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
+ }
+ if err := db.Find(&records).Error; err != nil {
+ return records, fmt.Errorf("find records err: %v", err)
+ }
+
+ return records, nil
+}
+
+// FindByQuery 鎸囧畾鏉′欢鏌ヨ.
+func (slf *ScheduleTaskSearch) FindByQuery(query string, args []interface{}) ([]*ScheduleTask, int64, error) {
+ var (
+ records = make([]*ScheduleTask, 0)
+ total int64
+ db = slf.Orm.Table(slf.TableName()).Where(query, args...)
+ )
+
+ if err := db.Count(&total).Error; err != nil {
+ return records, total, fmt.Errorf("find by query count err: %v", err)
+ }
+ if slf.PageNum*slf.PageSize > 0 {
+ db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
+ }
+ if err := db.Find(&records).Error; err != nil {
+ return records, total, fmt.Errorf("find by query records err: %v, query: %s, args: %+v", err, query, args)
+ }
+
+ return records, total, nil
+}
+
+// FindByQueryNotTotal 鎸囧畾鏉′欢鏌ヨ&涓嶆煡璇㈡�绘潯鏁�.
+func (slf *ScheduleTaskSearch) FindByQueryNotTotal(query string, args []interface{}) ([]*ScheduleTask, error) {
+ var (
+ records = make([]*ScheduleTask, 0)
+ db = slf.Orm.Table(slf.TableName()).Where(query, args...)
+ )
+
+ if slf.PageNum*slf.PageSize > 0 {
+ db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
+ }
+ if err := db.Find(&records).Error; err != nil {
+ return records, fmt.Errorf("find by query records err: %v, query: %s, args: %+v", err, query, args)
+ }
+
+ return records, nil
+}
diff --git a/model/sqlite.go b/model/sqlite.go
new file mode 100644
index 0000000..8b53790
--- /dev/null
+++ b/model/sqlite.go
@@ -0,0 +1 @@
+package model
diff --git a/model/user.go b/model/user.go
deleted file mode 100644
index 3b931e9..0000000
--- a/model/user.go
+++ /dev/null
@@ -1,271 +0,0 @@
-package model
-
-import (
- "apsClient/constvar"
- "apsClient/pkg/mysqlx"
- "fmt"
- "gorm.io/gorm"
- "time"
-)
-
-type (
- // User token閲岃竟鎶婄敤鎴稩D銆佺埗鐢ㄦ埛ID銆佽鑹查兘甯︿笂
- User struct {
- ID string `json:"id" gorm:"primaryKey;type:varchar(255);comment:鐢ㄦ埛ID"`
- Username string `json:"username" gorm:"index;type:varchar(255);comment:鐢ㄦ埛鐧诲綍鍚�"`
- UserType constvar.UserType `json:"userType" gorm:"type:int(11);comment:鐢ㄦ埛绫诲瀷 1瓒呯骇绠$悊鍛� 2涓昏处鎴� 3瀛愯处鎴�"`
- Password string `json:"-" gorm:"type:varchar(255);comment:鐢ㄦ埛鐧诲綍瀵嗙爜"`
- NickName string `json:"nickName" gorm:"type:varchar(255);default:绯荤粺鐢ㄦ埛;comment:鐢ㄦ埛鏄电О"`
- HeaderImage string `json:"headerImage" gorm:"type:mediumtext;comment:鐢ㄦ埛澶村儚"`
- Phone string `json:"phone" gorm:"type:varchar(255);comment:鐢ㄦ埛鎵嬫満鍙�"`
- Enable bool `json:"enable" gorm:"type:tinyint(1);comment:鐢ㄦ埛鏄惁琚喕缁�"`
- ParentId string `json:"parentId" gorm:"type:varchar(255);comment:鐖剁敤鎴稩D"`
- ParentName string `json:"parentName" gorm:"type:varchar(255);comment:鐖剁敤鎴峰悕绉�"`
- CompanyName string `json:"companyName" gorm:"type:varchar(255);comment:鍏徃鍚嶇О"`
- CompanyEmail string `json:"companyEmail" gorm:"type:varchar(255);comment:鍏徃閭"`
- CompanyContact string `json:"companyContact" gorm:"type:varchar(255);comment:鍏徃鑱旂郴浜哄鍚�"`
- CompanyProvince string `json:"companyProvince" gorm:"type:varchar(255);comment:鍏徃鎵�鍦ㄧ渷"`
- CompanyCity string `json:"companyCity" gorm:"type:varchar(255);comment:鍏徃鎵�鍦ㄥ競"`
- CompanyTrade string `json:"companyTrade" gorm:"type:varchar(255);comment:鍏徃琛屼笟"`
- Pos string `json:"pos" gorm:"type:varchar(255);comment:宀椾綅"`
- ModifiedPwd bool `json:"-" gorm:"type:tinyint(1);comment:鏄惁鏀硅繃瀵嗙爜"`
- CreateTime int64 `json:"-" gorm:"type:bigint(20);comment:鍒涘缓鏃堕棿"`
- UpdateTime int64 `json:"-" gorm:"type:bigint(20);comment:鏇存柊鏃堕棿"`
- Menus []Menu `json:"-" gorm:"many2many:user_menu;"` // 鐢ㄦ埛鑿滃崟
- CreateAt string `json:"createAt" gorm:"-"` // 鍒涘缓鏃堕棿
- UpdateAt string `json:"updateAt" gorm:"-"` // 鏇存柊鏃堕棿
- MenuIds []uint `json:"menuIds" gorm:"-"` // 鑿滃崟ID鍒楄〃
- Ip string `json:"ip" gorm:"type:varchar(255);comment:闆嗙兢Ip"`
- Port string `json:"port" gorm:"type:varchar(255);comment:绔彛鍙�"`
- Status int `json:"status" gorm:"type:int(11);comment:鐢ㄦ埛瀹℃牳鐘舵�� 0:绂佺敤; 1:姝e父; 2:瀹℃牳涓�"`
- CompanyLogo string `json:"companyLogo" gorm:"type:mediumtext;comment:鍏徃logo"`
- SystemName string `json:"systemName" gorm:"type:varchar(255);comment:绯荤粺鍚嶇О"`
- }
-
- UserSearch struct {
- User
- Keyword string // 妯$硦鏌ヨ鍏抽敭瀛�
- Order string
- PageNum int
- PageSize int
- Orm *gorm.DB
- }
-)
-
-func (slf User) TableName() string {
- return "user"
-}
-
-func (slf *User) BeforeCreate(tx *gorm.DB) error {
- slf.CreateTime = time.Now().Unix()
- slf.UpdateTime = slf.CreateTime
- return nil
-}
-
-func (slf *User) BeforeSave(tx *gorm.DB) error {
- slf.UpdateTime = time.Now().Unix()
- return nil
-}
-
-func (slf *User) BeforeUpdate(tx *gorm.DB) error {
- slf.UpdateTime = time.Now().Unix()
- return nil
-}
-
-func (slf *User) AfterFind(tx *gorm.DB) error {
- slf.CreateAt = time.Unix(slf.CreateTime, 0).Format("2006-01-02 15:04:05")
- slf.UpdateAt = time.Unix(slf.UpdateTime, 0).Format("2006-01-02 15:04:05")
- return nil
-}
-
-func NewUserSearch(db *gorm.DB) *UserSearch {
- if db == nil {
- db = mysqlx.GetDB()
- }
- return &UserSearch{Orm: db}
-}
-
-func (slf *UserSearch) SetOrm(tx *gorm.DB) *UserSearch {
- slf.Orm = tx
- return slf
-}
-
-func (slf *UserSearch) SetPage(page, size int) *UserSearch {
- slf.PageNum, slf.PageSize = page, size
- return slf
-}
-
-func (slf *UserSearch) SetOrder(order string) *UserSearch {
- slf.Order = order
- return slf
-}
-
-func (slf *UserSearch) SetId(id string) *UserSearch {
- slf.ID = id
- return slf
-}
-
-func (slf *UserSearch) SetParentId(parentId string) *UserSearch {
- slf.ParentId = parentId
- return slf
-}
-
-func (slf *UserSearch) SetParentName(parentName string) *UserSearch {
- slf.ParentName = parentName
- return slf
-}
-
-func (slf *UserSearch) SetUserName(username string) *UserSearch {
- slf.Username = username
- return slf
-}
-
-func (slf *UserSearch) SetKeyword(keyword string) *UserSearch {
- slf.Keyword = keyword
- return slf
-}
-
-func (slf *UserSearch) build() *gorm.DB {
- var db = slf.Orm.Model(&User{}).Preload("Menus")
-
- if slf.ID != "" {
- db = db.Where("id = ?", slf.ID)
- }
-
- if slf.Username != "" {
- db = db.Where("username = ?", slf.Username)
- }
-
- if slf.ParentName != "" {
- db = db.Where("parent_name = ?", slf.ParentName)
- }
-
- if slf.Keyword != "" {
- db = db.Where("nick_name LIKE ? or phone LIKE ?", "%"+slf.Keyword+"%", "%"+slf.Keyword+"%")
- }
-
- if slf.Order != "" {
- db = db.Order(slf.Order)
- }
-
- return db
-}
-
-// Create 鍗曟潯鎻掑叆
-func (slf *UserSearch) Create(record *User) error {
- var db = slf.build()
-
- if err := db.Create(record).Error; err != nil {
- return fmt.Errorf("create err: %v, record: %+v", err, record)
- }
-
- return nil
-}
-
-// CreateBatch 鎵归噺鎻掑叆
-func (slf *UserSearch) CreateBatch(records []*User) error {
- var db = slf.build()
-
- if err := db.Create(&records).Error; err != nil {
- return fmt.Errorf("create batch err: %v, records: %+v", err, records)
- }
-
- return nil
-}
-
-func (slf *UserSearch) Save(record *User) error {
- var db = slf.build()
-
- if err := db.Save(record).Error; err != nil {
- return fmt.Errorf("save err: %v, record: %+v", err, record)
- }
-
- return nil
-}
-
-func (slf *UserSearch) UpdateByMap(upMap map[string]interface{}) error {
- var (
- db = slf.build()
- )
-
- if err := db.Updates(upMap).Error; err != nil {
- return fmt.Errorf("update by map err: %v, upMap: %+v", err, upMap)
- }
-
- return nil
-}
-
-func (slf *UserSearch) UpdateByQuery(query string, args []interface{}, upMap map[string]interface{}) error {
- var (
- db = slf.Orm.Table(slf.TableName()).Where(query, args...)
- )
-
- if err := db.Updates(upMap).Error; err != nil {
- return fmt.Errorf("update by query err: %v, query: %s, args: %+v, upMap: %+v", err, query, args, upMap)
- }
-
- return nil
-}
-
-func (slf *UserSearch) Delete() error {
- var db = slf.build()
-
- if err := db.Unscoped().Delete(&User{}).Error; err != nil {
- return err
- }
-
- return nil
-}
-
-func (slf *UserSearch) First() (*User, error) {
- var (
- record = new(User)
- db = slf.build()
- )
-
- if err := db.First(record).Error; err != nil {
- return record, err
- }
-
- return record, nil
-}
-
-func (slf *UserSearch) Find() ([]*User, int64, error) {
- var (
- records = make([]*User, 0)
- total int64
- db = slf.build()
- )
-
- if err := db.Count(&total).Error; err != nil {
- return records, total, fmt.Errorf("find count err: %v", err)
- }
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
- if err := db.Omit("company_Logo").Find(&records).Error; err != nil {
- return records, total, fmt.Errorf("find records err: %v", err)
- }
-
- return records, total, nil
-}
-
-func (slf *UserSearch) FindNotTotal() ([]*User, error) {
- var (
- records = make([]*User, 0)
- db = slf.build()
- )
-
- if slf.PageNum*slf.PageSize > 0 {
- db = db.Offset((slf.PageNum - 1) * slf.PageSize).Limit(slf.PageSize)
- }
- if err := db.Omit("company_Logo").Find(&records).Error; err != nil {
- return records, fmt.Errorf("find records err: %v", err)
- }
-
- return records, nil
-}
-
-func (slf *UserSearch) ReplaceMenu(user *User, menus []*Menu) error {
- return slf.Orm.Model(user).Association("Menus").Replace(menus)
-}
diff --git a/nsq/consumer.go b/nsq/consumer.go
new file mode 100644
index 0000000..0f0bbdc
--- /dev/null
+++ b/nsq/consumer.go
@@ -0,0 +1,37 @@
+package nsq
+
+import (
+ "apsClient/conf"
+ "apsClient/pkg/logx"
+ "apsClient/pkg/nsqclient"
+ "context"
+ "fmt"
+)
+
+func Consume(topic, channel string) (err error) {
+ c, err := nsqclient.NewNsqConsumer(context.Background(), topic, channel)
+ if err != nil {
+ logx.Errorf("NewNsqConsumer err:%v", err)
+ return
+ }
+ logx.Infof("Consume NewNsqConsumer topic:%v", topic)
+ var handler MsgHandler
+ switch topic {
+ case fmt.Sprintf("aps.%v.scheduleTask", conf.Conf.NsqConf.NodeId):
+ handler = new(ScheduleTask)
+ }
+ c.AddHandler(handler.HandleMessage)
+
+ if len(conf.Conf.NsqConf.NsqlookupdAddr) > 0 {
+ if err = c.RunLookupd(conf.Conf.NsqConf.NsqlookupdAddr, 1); err != nil {
+ logx.Errorf("RunLookupd err:%v", err)
+ return
+ }
+ } else {
+ if err = c.Run(conf.Conf.NsqConf.NsqdAddr, 1); err != nil {
+ logx.Errorf("Run err:%v", err)
+ return
+ }
+ }
+ return
+}
diff --git a/nsq/model.go b/nsq/model.go
new file mode 100644
index 0000000..693dd18
--- /dev/null
+++ b/nsq/model.go
@@ -0,0 +1,52 @@
+package nsq
+
+import (
+ "github.com/shopspring/decimal"
+)
+
+// 鎺掔▼浠诲姟涓嬪彂
+type (
+ Order struct {
+ OrderID string `gorm:"index;type:varchar(191);not null;comment:璁㈠崟ID" json:"orderId"`
+ ProductID string `gorm:"type:varchar(191);comment:浜у搧ID" json:"productId"`
+ ProductName string `gorm:"type:varchar(191);comment:浜у搧鍚嶇О" json:"productName"`
+ Parameter string `gorm:"type:varchar(1024);comment:鍙傛暟闇�姹�" json:"parameter"`
+ Customer string `gorm:"type:varchar(191);comment:瀹㈡埛缂栫爜" json:"customer"`
+ DeliverDate string `gorm:"type:varchar(100);comment:浜よ揣鏃ユ湡" json:"deliverDate"`
+ OrderAttr string `json:"orderAttr"` // 璁㈠崟灞炴�ф嫾鎺ョ殑瀛楃涓诧紝鍗宠揣鐗╂弿杩�
+ Amount decimal.Decimal `gorm:"type:decimal(35,18);comment:鏁伴噺" json:"amount"`
+ Unit string `gorm:"type:varchar(100);comment:鍗曚綅" json:"unit"`
+ StartTime int64 `gorm:"comment:璁″垝寮�濮嬫椂闂�" json:"startTime"`
+ EndTime int64 `gorm:"comment:璁″垝缁撴潫鏃堕棿" json:"endTime"`
+ }
+
+ ProcedureMaterial struct {
+ MaterialID string `gorm:"type:varchar(191);comment:鐗╂枡缂栧彿" json:"materialId"`
+ MaterialName string `gorm:"unique;type:varchar(191);not null;comment:鐗╂枡鍚嶇О" json:"materialName"`
+ Amount decimal.Decimal `gorm:"type:decimal(35,18);comment:鏁伴噺" json:"amount"`
+ Unit string `gorm:"type:varchar(191);comment:鍗曚綅" json:"unit"`
+ }
+
+ ProcedureWorker struct {
+ WorkerID string `gorm:"type:varchar(2048);comment:浜哄憳ID" json:"workerId"`
+ WorkerName string `gorm:"unique;type:varchar(191);not null;comment:浜哄憳濮撳悕" json:"workerName"`
+ PhoneNum string `gorm:"type:varchar(191);comment:鎵嬫満鍙�" json:"phoneNum"`
+ }
+
+ ProductProcedure struct {
+ ProcedureID string `gorm:"uniqueIndex:idx_product_procedure;type:varchar(191);comment:宸ュ簭ID" json:"procedureId"`
+ ProcedureName string `gorm:"type:varchar(191);comment:宸ュ簭鍚嶇О锛屼粎鏌ヨ鐢�" json:"procedureName"`
+ DeviceID string `gorm:"type:varchar(191);not null;comment:璁惧ID" json:"deviceId"`
+ StartTime int64 `gorm:"comment:璁″垝寮�濮嬫椂闂�" json:"startTime"`
+ EndTime int64 `gorm:"comment:璁″垝缁撴潫鏃堕棿" json:"endTime"`
+ WorkHours decimal.Decimal `gorm:"type:decimal(35,18);comment:宸ユ椂" json:"workHours"`
+ InputMaterials []*ProcedureMaterial `json:"inputMaterials"` // 杈撳叆鐗╂枡鍒楄〃
+ OutputMaterials []*ProcedureMaterial `json:"outputMaterials"` // 杈撳嚭鐗╂枡鍒楄〃
+ Workers []*ProcedureWorker `json:"workers"` // 浜哄憳鍒楄〃
+ }
+
+ DeliverScheduleTask struct {
+ Order Order `json:"order"`
+ Procedures []*ProductProcedure `json:"procedures"` // 宸ュ簭鍒楄〃
+ }
+)
diff --git a/nsq/msg_handler.go b/nsq/msg_handler.go
new file mode 100644
index 0000000..495b6ef
--- /dev/null
+++ b/nsq/msg_handler.go
@@ -0,0 +1,51 @@
+package nsq
+
+import (
+ "apsClient/model"
+ "apsClient/pkg/logx"
+ "encoding/json"
+ "fmt"
+)
+
+type MsgHandler interface {
+ HandleMessage(data []byte) (err error)
+}
+
+type ScheduleTask struct {
+}
+
+func (slf *ScheduleTask) HandleMessage(data []byte) (err error) {
+ fmt.Println(string(data))
+
+ var tasks = make([]*DeliverScheduleTask, 0)
+
+ err = json.Unmarshal(data, &tasks)
+ if err != nil {
+ logx.Errorf("ScheduleTask HandleMessage Unmarshal json err: %v", err.Error())
+ return err
+ }
+ for _, task := range tasks {
+ taskRecord := model.ScheduleTask{
+ Id: 0,
+ OrderId: task.Order.OrderID,
+ ProductId: task.Order.ProductID,
+ ProductName: task.Order.ProductName,
+ Amount: task.Order.Amount,
+ Unit: task.Order.Unit,
+ StartTime: task.Order.StartTime,
+ EndTime: task.Order.EndTime,
+ }
+ jsonStr, err := json.Marshal(task)
+ if err != nil {
+ logx.Errorf("ScheduleTask HandleMessage Marshal err: %v, old: %#v", err.Error(), task)
+ return err
+ }
+ taskRecord.Data = string(jsonStr)
+ err = model.NewScheduleTaskSearch(nil).Create(&taskRecord)
+ if err != nil {
+ logx.Errorf("ScheduleTask HandleMessage Create taskRecord err: %v, record: %#v", err.Error(), taskRecord)
+ return err
+ }
+ }
+ return nil
+}
diff --git a/nsq/nsq.go b/nsq/nsq.go
new file mode 100644
index 0000000..c4bdcf2
--- /dev/null
+++ b/nsq/nsq.go
@@ -0,0 +1,24 @@
+package nsq
+
+import (
+ "apsClient/conf"
+ "apsClient/pkg/safe"
+ "errors"
+ "fmt"
+)
+
+func Init() error {
+ if len(conf.Conf.NsqConf.NodeId) <= 0 {
+ return errors.New("no NodeId")
+ }
+
+ if err := initProducer(); err != nil {
+ return err
+ }
+
+ safe.Go(func() {
+ _ = Consume(fmt.Sprintf("aps.%v.scheduleTask", conf.Conf.NsqConf.NodeId), "sensor01")
+ })
+
+ return nil
+}
diff --git a/nsq/producer.go b/nsq/producer.go
new file mode 100644
index 0000000..d7b80b2
--- /dev/null
+++ b/nsq/producer.go
@@ -0,0 +1,60 @@
+package nsq
+
+import (
+ "apsClient/conf"
+ "apsClient/pkg/logx"
+ "apsClient/pkg/nsqclient"
+)
+
+var producer nsqclient.Producer
+
+func GetProducer() nsqclient.Producer {
+ return producer
+}
+
+func initProducer() (err error) {
+ producer, err = nsqclient.NewProducer(conf.Conf.NsqConf.NsqdAddr)
+ if err != nil {
+ logx.Errorf("NewProducer err:%v", err)
+ return err
+ }
+ // 娴嬭瘯鍙戝竷鏁版嵁
+ //go func() {
+ // for {
+ // time.Sleep(time.Second)
+ // _ = producer.Publish("test", []byte("123"))
+ // }
+ //}()
+
+ //go func() {
+ // for {
+ // time.Sleep(time.Second * 2)
+ // err := producer.Publish("aps.wangpengfei.erp.cstReply", []byte("456"))
+ // logx.Infof("=====err:%v", err)
+ // }
+ //}()
+
+ //go func() {
+ // for {
+ // time.Sleep(time.Second * 5)
+ // applyMaterial := ApplyOrderMaterial{
+ // FBillNo: "123",
+ // FNumber: "456",
+ // UseAmount: 1,
+ // }
+ //
+ // applyBytes, err := json.Marshal([]*ApplyOrderMaterial{&applyMaterial})
+ // if err != nil {
+ // return
+ // }
+ //
+ // producer := GetProducer()
+ // err = producer.Publish(fmt.Sprintf("aps.%v.erp.cstApply", conf.WebConf.NodeId), applyBytes)
+ // logx.Infof("===============ApplyMaterialByProduct topic:%v, applyBytes:%v, err:%v", fmt.Sprintf("aps.%v.erp.cstApply", conf.WebConf.NodeId), string(applyBytes), err)
+ // if err != nil {
+ // return
+ // }
+ // }
+ //}()
+ return nil
+}
diff --git a/pkg/contextx/contextx.go b/pkg/contextx/contextx.go
index c4350a9..5531fe7 100644
--- a/pkg/contextx/contextx.go
+++ b/pkg/contextx/contextx.go
@@ -18,6 +18,12 @@
Data interface{} `json:"data"`
Msg string `json:"msg"`
}
+ ResponseList struct {
+ Code int `json:"code"`
+ Data interface{} `json:"data"`
+ Msg string `json:"msg"`
+ Total int64 `json:"total"`
+ }
)
func NewContext(ctx *gin.Context, params interface{}) (r *Context, isAllow bool) {
@@ -71,6 +77,13 @@
})
}
+func (slf *Context) ResultList(data interface{}, total int64) {
+ slf.ctx.JSON(http.StatusOK, ResponseList{
+ Data: data,
+ Total: total,
+ })
+}
+
func (slf *Context) Ok() {
slf.Result(ecode.OK, map[string]interface{}{}, "")
}
diff --git a/pkg/nsqclient/README.md b/pkg/nsqclient/README.md
new file mode 100644
index 0000000..432d86d
--- /dev/null
+++ b/pkg/nsqclient/README.md
@@ -0,0 +1,54 @@
+# NSQPool
+
+NSQPool is a thread safe connection pool for nsq producer. It can be used to
+manage and reuse nsq producer connection.
+
+
+## Install and Usage
+
+Install the package with:
+
+```bash
+github.com/qgymje/nsqpool
+```
+
+Import it with:
+
+```go
+import (
+ "github.com/qgymje/nsqpool"
+ nsq "github.com/nsqio/go-nsq"
+)
+```
+
+and use `pool` as the package name inside the code.
+
+## Example
+
+```go
+// create a factory() to be used with channel based pool
+factory := func() (*nsq.Producer, error) {
+ config := nsq.NewConfig()
+ return nsq.NewProducer(":4150", config)
+}
+
+nsqPool, err := pool.NewChannelPool(5, 30, factory)
+
+producer, err := nsqPool.Get()
+
+producer.Publish("topic", "some data")
+// do something with producer and put it back to the pool by closing the connection
+// (this doesn't close the underlying connection instead it's putting it back
+// to the pool).
+producer.Close()
+
+// close pool any time you want, this closes all the connections inside a pool
+nsqPool.Close()
+
+// currently available connections in the pool
+current := nsqPool.Len()
+```
+
+## License
+
+The MIT License (MIT) - see LICENSE for more details
diff --git a/pkg/nsqclient/channel.go b/pkg/nsqclient/channel.go
new file mode 100644
index 0000000..1594dcc
--- /dev/null
+++ b/pkg/nsqclient/channel.go
@@ -0,0 +1,134 @@
+package nsqclient
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ nsq "github.com/nsqio/go-nsq"
+)
+
+// channelPool implements the Pool interface based on buffered channels.
+type channelPool struct {
+ // storage for our net.Conn connections
+ mu sync.Mutex
+ conns chan *nsq.Producer
+
+ // net.Conn generator
+ factory Factory
+}
+
+// Factory is a function to create new connections.
+type Factory func() (*nsq.Producer, error)
+
+// NewChannelPool returns a new pool based on buffered channels with an initial
+// capacity and maximum capacity. Factory is used when initial capacity is
+// greater than zero to fill the pool. A zero initialCap doesn't fill the Pool
+// until a new Get() is called. During a Get(), If there is no new connection
+// available in the pool, a new connection will be created via the Factory()
+// method.
+func NewChannelPool(initialCap, maxCap int, factory Factory) (Pool, error) {
+ if initialCap < 0 || maxCap <= 0 || initialCap > maxCap {
+ return nil, errors.New("invalid capacity settings")
+ }
+
+ c := &channelPool{
+ conns: make(chan *nsq.Producer, maxCap),
+ factory: factory,
+ }
+
+ // create initial connections, if something goes wrong,
+ // just close the pool error out.
+ for i := 0; i < initialCap; i++ {
+ conn, err := factory()
+ if err != nil {
+ c.Close()
+ return nil, fmt.Errorf("factory is not able to fill the pool: %s", err)
+ }
+ c.conns <- conn
+ }
+
+ return c, nil
+}
+
+func (c *channelPool) getConns() chan *nsq.Producer {
+ c.mu.Lock()
+ conns := c.conns
+ c.mu.Unlock()
+ return conns
+}
+
+// Get implements the Pool interfaces Get() method. If there is no new
+// connection available in the pool, a new connection will be created via the
+// Factory() method.
+func (c *channelPool) Get() (*PoolConn, error) {
+ conns := c.getConns()
+ if conns == nil {
+ return nil, ErrClosed
+ }
+
+ // wrap our connections with out custom net.Conn implementation (wrapConn
+ // method) that puts the connection back to the pool if it's closed.
+ select {
+ case conn := <-conns:
+ if conn == nil {
+ return nil, ErrClosed
+ }
+
+ return c.wrapConn(conn), nil
+ default:
+ conn, err := c.factory()
+ if err != nil {
+ return nil, err
+ }
+
+ return c.wrapConn(conn), nil
+ }
+}
+
+// put puts the connection back to the pool. If the pool is full or closed,
+// conn is simply closed. A nil conn will be rejected.
+func (c *channelPool) put(conn *nsq.Producer) error {
+ if conn == nil {
+ return errors.New("connection is nil. rejecting")
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.conns == nil {
+ // pool is closed, close passed connection
+ conn.Stop()
+ return nil
+ }
+
+ // put the resource back into the pool. If the pool is full, this will
+ // block and the default case will be executed.
+ select {
+ case c.conns <- conn:
+ return nil
+ default:
+ // pool is full, close passed connection
+ conn.Stop()
+ return nil
+ }
+}
+
+func (c *channelPool) Close() {
+ c.mu.Lock()
+ conns := c.conns
+ c.conns = nil
+ c.factory = nil
+ c.mu.Unlock()
+
+ if conns == nil {
+ return
+ }
+
+ close(conns)
+ for conn := range conns {
+ conn.Stop()
+ }
+}
+
+func (c *channelPool) Len() int { return len(c.getConns()) }
diff --git a/pkg/nsqclient/cmd/TST/ctest/ctest.cpp b/pkg/nsqclient/cmd/TST/ctest/ctest.cpp
new file mode 100644
index 0000000..b435012
--- /dev/null
+++ b/pkg/nsqclient/cmd/TST/ctest/ctest.cpp
@@ -0,0 +1,101 @@
+#include <stdio.h>
+#include <string.h>
+#include "clib/libnsqclient.h"
+
+#include <string>
+#include <thread>
+#include <mutex>
+
+using namespace std;
+
+static void produce(int two){
+ char ip[] = "192.168.20.108:4150";
+ GoString addr = {ip, (ptrdiff_t)strlen(ip)};
+ void* p = createProducer(addr);
+
+ string msg("cnsqclient dynamic library");
+ while(msg.size() < 32){
+ msg += msg;
+ }
+ // printf("msg %s\n", msg.c_str());
+
+ for(int i = 0; i < 1000000; i++){
+ GoString topic = {"test", 4};
+ string amsg = msg + "-x";
+ GoSlice data{(void*)amsg.data(), (GoInt)amsg.size(), (GoInt)amsg.size()};
+ if (!publish(p, topic, data)){
+ printf("publish msg failed topic %s\n", topic.p);
+ exit(0);
+ }
+
+ if (two){
+ topic.p = "test2";
+ topic.n = 5;
+ amsg = msg + "-y";
+ data.data = (void*)amsg.data();
+ if (!publish(p, topic, data)){
+ printf("publish msg failed topic %s\n", topic.p);
+ exit(0);
+ }
+ }
+
+ }
+ destroyProducer(p);
+}
+
+
+static void consume(const char* topic, const char* channel){
+ GoString t = {topic, (ptrdiff_t)strlen(topic)};
+ GoString c = {channel, (ptrdiff_t)strlen(channel)};
+
+ void* con = createConsumer(t, c);
+
+
+ // thread
+ thread([&con]{
+
+ // char ip[] = "192.168.20.108:4150";
+ // GoString addr = {ip, (ptrdiff_t)strlen(ip)};
+ // Run(con, addr);
+
+ char lip[] = "192.168.20.108:4161";
+ GoString laddr = {lip, (ptrdiff_t)strlen(lip)};
+ RunLookupd(con, laddr);
+
+ }).detach();
+
+ auto start = chrono::steady_clock::now();
+ int count = 0;
+ while (true) {
+ void* msg = NULL;
+ size_t size = 0;
+ GoUint8 ok = getMessage(con, &msg, &size);
+ if (!ok){
+ this_thread::sleep_for(chrono::milliseconds(100));
+ continue;
+ }
+ count++;
+ printf("======>> recv msg %s size %d\n", (char*)msg, count);
+ relMessage(msg);
+ if (count > 999000){
+ printf("======>> use time %ld\n",
+ chrono::duration_cast<chrono::seconds>(chrono::steady_clock::now()-start).count());
+ }
+ }
+ printf("======>> recv all msg size %d\n", count);
+}
+
+int main(int argc, char const *argv[])
+{
+ bool two = false;
+
+ thread([two]{
+ produce(two);
+ }).detach();
+
+ if (two) thread([]{ consume("test2", "sensor01"); }).detach();
+
+ consume("test", "sensor01");
+
+ return 0;
+}
\ No newline at end of file
diff --git a/pkg/nsqclient/cmd/TST/test/test.go b/pkg/nsqclient/cmd/TST/test/test.go
new file mode 100644
index 0000000..534491f
--- /dev/null
+++ b/pkg/nsqclient/cmd/TST/test/test.go
@@ -0,0 +1,78 @@
+package test
+
+import (
+ "basic.com/aps/nsqclient.git"
+ "context"
+ "fmt"
+ "log"
+ "time"
+)
+
+func produce(two bool) {
+ p, _ := nsqclient.NewProducer("192.168.20.108:4150")
+
+ var str string
+ for len(str) < 32 {
+ str += "cnsqclient dynamic library"
+ }
+ msgx := []byte(str + "--x")
+ msgy := []byte(str + "--y")
+
+ // count := 0
+ for i := 0; i < 1000000; i++ {
+ // if e := p.Publish("test", []byte("x")); e != nil {
+ if e := p.Publish("test", msgx); e != nil {
+ log.Fatal("Publish error:" + e.Error())
+ }
+
+ if two {
+ // if e := p.Publish("test", []byte("y")); e != nil {
+ if e := p.Publish("test2", msgy); e != nil {
+ log.Fatal("Publish error:" + e.Error())
+ }
+ }
+
+ // log.Println("send time ", count)
+ // count++
+ }
+}
+
+func consume(topic, channel string) {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ if c, e := nsqclient.NewNsqConsumer(ctx, topic, channel); e != nil {
+ fmt.Println("NewNsqConsumer failed", e)
+ return
+ } else {
+ ch := make(chan struct{})
+
+ count := 0
+ c.AddHandler(func(data []byte) error {
+ count++
+ fmt.Println("recv msg ", string(data), " size", count)
+ if count > 999000 {
+ ch <- struct{}{}
+ }
+ return nil
+ })
+
+ // go c.Run("192.168.20.108:4150", 2)
+ go c.RunLookupd("192.168.20.108:4161", 2)
+
+ t := time.Now()
+ <-ch
+ // fmt.Println("======>> use time ", time.Since(t))
+ fmt.Println("======>> use time ", time.Now().Unix()-t.Unix())
+ cancel()
+ }
+}
+
+func Test() {
+ two := false
+ go produce(two)
+
+ if two {
+ go consume("test2", "sensor01")
+ }
+ consume("test", "sensor01")
+}
diff --git a/pkg/nsqclient/cmd/main.go b/pkg/nsqclient/cmd/main.go
new file mode 100644
index 0000000..3f6aa46
--- /dev/null
+++ b/pkg/nsqclient/cmd/main.go
@@ -0,0 +1,141 @@
+package main
+
+// #include <stdlib.h>
+// #include <string.h>
+import "C"
+
+import (
+ "basic.com/aps/nsqclient.git"
+ "sync"
+ "time"
+ "unsafe"
+)
+
+//export createProducer
+func createProducer(addr string) unsafe.Pointer {
+ n, _ := nsqclient.NewProducer(addr)
+ return nsqclient.Save(n)
+}
+
+//export destroyProducer
+func destroyProducer(ph unsafe.Pointer) {
+ nsqclient.Unref(ph)
+ nsqclient.DestroyProducerPool()
+}
+
+func pcvt(ph unsafe.Pointer) nsqclient.Producer {
+ return nsqclient.Restore(ph).(nsqclient.Producer)
+}
+
+//export publish
+func publish(ph unsafe.Pointer, topic string, msg []byte) bool {
+ p := pcvt(ph)
+ if err := p.Publish(topic, msg); err != nil {
+ return false
+ }
+ return true
+}
+
+//export multiPublish
+func multiPublish(ph unsafe.Pointer, topic string, msg [][]byte) bool {
+ p := pcvt(ph)
+ if err := p.MultiPublish(topic, msg); err != nil {
+ return false
+ }
+ return true
+}
+
+//export deferredPublish
+func deferredPublish(ph unsafe.Pointer, topic string, ms int, msg []byte) bool {
+ p := pcvt(ph)
+ if err := p.DeferredPublish(topic, time.Duration(ms)*time.Millisecond, msg); err != nil {
+ return false
+ }
+ return true
+}
+
+/////////////////////////////////////////////////////////////
+
+type consumer struct {
+ nsqcon *nsqclient.NsqConsumer
+ lck sync.Mutex
+ msgs [][]byte
+}
+
+//export createConsumer
+func createConsumer(topic, channel string) unsafe.Pointer {
+ if c, err := nsqclient.NewNsqConsumer(nil, topic, channel); err == nil {
+ con := &consumer{
+ nsqcon: c,
+ }
+ return nsqclient.Save(con)
+ }
+ return nil
+}
+
+func ccvt(ch unsafe.Pointer) *consumer {
+ return nsqclient.Restore(ch).(*consumer)
+}
+
+//export destroyConsumer
+func destroyConsumer(ch unsafe.Pointer) {
+ nsqclient.DestroyNsqConsumer(ccvt(ch).nsqcon)
+ nsqclient.Unref(ch)
+}
+
+//export Run
+func Run(ch unsafe.Pointer, addr string) {
+ c := ccvt(ch)
+ c.nsqcon.AddHandler(func(msg []byte) error {
+ c.lck.Lock()
+ defer c.lck.Unlock()
+ c.msgs = append(c.msgs, msg)
+ return nil
+ })
+
+ c.nsqcon.Run(addr, 1)
+}
+
+//export RunLookupd
+func RunLookupd(ch unsafe.Pointer, lookAddr string) {
+ c := ccvt(ch)
+ c.nsqcon.AddHandler(func(msg []byte) error {
+ c.lck.Lock()
+ defer c.lck.Unlock()
+ c.msgs = append(c.msgs, msg)
+ return nil
+ })
+
+ c.nsqcon.RunLookupd(lookAddr, 1)
+}
+
+//export getMessage
+func getMessage(ch unsafe.Pointer, data *unsafe.Pointer, size *C.size_t) bool {
+ c := ccvt(ch)
+ c.lck.Lock()
+ defer c.lck.Unlock()
+ if len(c.msgs) == 0 {
+ return false
+ }
+
+ msg := c.msgs[0]
+ c.msgs = c.msgs[1:]
+
+ *size = C.size_t(len(msg))
+ ptr := C.malloc(*size)
+ C.memcpy(ptr, unsafe.Pointer(&msg[0]), *size)
+ *data = ptr
+
+ return true
+}
+
+//export relMessage
+func relMessage(msg unsafe.Pointer) {
+ if msg != nil {
+ C.free(msg)
+ }
+}
+
+//func main() {
+// test.Test()
+//}
diff --git a/pkg/nsqclient/cmd/make.sh b/pkg/nsqclient/cmd/make.sh
new file mode 100644
index 0000000..2aa9d22
--- /dev/null
+++ b/pkg/nsqclient/cmd/make.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+if [ ! -d "clib" ]; then
+ mkdir clib
+fi
+go tool cgo -exportheader clib/libnsqclient.h main.go &&
+go build -buildmode=c-shared -o ./clib/libnsqclient.so &&
+rm -fr _obj &&
+go build -o gonsqcli &&
+g++ -std=c++11 -g -O0 -o cnsqcli TST/ctest/ctest.cpp -I. -Lclib/ -lnsqclient -ldl -pthread
diff --git a/pkg/nsqclient/conn.go b/pkg/nsqclient/conn.go
new file mode 100644
index 0000000..5c680b5
--- /dev/null
+++ b/pkg/nsqclient/conn.go
@@ -0,0 +1,45 @@
+package nsqclient
+
+import (
+ "sync"
+
+ nsq "github.com/nsqio/go-nsq"
+)
+
+// PoolConn is a wrapper around net.Conn to modify the the behavior of
+// net.Conn's Close() method.
+type PoolConn struct {
+ *nsq.Producer
+ mu sync.RWMutex
+ c *channelPool
+ unusable bool
+}
+
+// Close puts the given connects back to the pool instead of closing it.
+func (p *PoolConn) Close() error {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ if p.unusable {
+ if p.Producer != nil {
+ p.Producer.Stop()
+ return nil
+ }
+ return nil
+ }
+ return p.c.put(p.Producer)
+}
+
+// MarkUnusable marks the connection not usable any more, to let the pool close it instead of returning it to pool.
+func (p *PoolConn) MarkUnusable() {
+ p.mu.Lock()
+ p.unusable = true
+ p.mu.Unlock()
+}
+
+// newConn wraps a standard net.Conn to a poolConn net.Conn.
+func (c *channelPool) wrapConn(conn *nsq.Producer) *PoolConn {
+ p := &PoolConn{c: c}
+ p.Producer = conn
+ return p
+}
diff --git a/pkg/nsqclient/consumer.go b/pkg/nsqclient/consumer.go
new file mode 100644
index 0000000..a0df0b0
--- /dev/null
+++ b/pkg/nsqclient/consumer.go
@@ -0,0 +1,99 @@
+package nsqclient
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ nsq "github.com/nsqio/go-nsq"
+)
+
+type NsqConsumer struct {
+ consumer *nsq.Consumer
+ // handler nsq.Handler
+ handler func([]byte) error
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ topic string
+ channel string
+}
+
+func NewNsqConsumer(ctx context.Context, topic, channel string, options ...func(*nsq.Config)) (*NsqConsumer, error) {
+ conf := nsq.NewConfig()
+ conf.MaxAttempts = 0
+ conf.MsgTimeout = 10 * time.Minute // 榛樿涓�涓秷鎭渶澶氳兘澶勭悊鍗佸垎閽燂紝鍚﹀垯灏变細閲嶆柊涓㈠叆闃熷垪
+ conf.LookupdPollInterval = 3 * time.Second // 璋冩暣consumer鐨勯噸杩為棿闅旀椂闂翠负3绉�
+ for _, option := range options {
+ option(conf)
+ }
+
+ consumer, err := nsq.NewConsumer(topic, channel, conf)
+ if err != nil {
+ return nil, err
+ }
+ return &NsqConsumer{
+ consumer: consumer,
+ ctx: ctx,
+ topic: topic,
+ channel: channel,
+ }, nil
+}
+
+func DestroyNsqConsumer(c *NsqConsumer) {
+ if c != nil {
+ if c.ctxCancel != nil {
+ c.ctxCancel()
+ }
+ }
+}
+
+// func (n *NsqConsumer) AddHandler(handler nsq.Handler) {
+// n.handler = handler
+// }
+
+func (n *NsqConsumer) AddHandler(handler func([]byte) error) {
+ n.handler = handler
+}
+
+func (n *NsqConsumer) Run(qaddr string, concurrency int) error {
+ return n.RunDistributed([]string{qaddr}, nil, concurrency)
+}
+
+func (n *NsqConsumer) RunLookupd(lookupAddr string, concurrency int) error {
+ return n.RunDistributed(nil, []string{lookupAddr}, concurrency)
+}
+
+func (n *NsqConsumer) RunDistributed(qAddr, lAddr []string, concurrency int) error {
+ n.consumer.ChangeMaxInFlight(concurrency)
+ // n.consumer.AddConcurrentHandlers(n.handler, concurrency)
+ n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(msg *nsq.Message) error {
+ return n.handler(msg.Body)
+ // return nil
+ }), concurrency)
+
+ var err error
+ if len(qAddr) > 0 {
+ err = n.consumer.ConnectToNSQDs(qAddr)
+ } else if len(lAddr) > 0 {
+ err = n.consumer.ConnectToNSQLookupds(lAddr)
+ } else {
+ err = fmt.Errorf("Addr Must NOT Empty")
+ }
+ if err != nil {
+ return err
+ }
+
+ if n.ctx == nil {
+ n.ctx, n.ctxCancel = context.WithCancel(context.Background())
+ }
+
+ for {
+ select {
+ case <-n.ctx.Done():
+ fmt.Println("[%s] %s,%s", "stop consumer", n.topic, n.channel)
+ n.consumer.Stop()
+ fmt.Println("[%s] %s,%s", "stop consumer success", n.topic, n.channel)
+ return nil
+ }
+ }
+}
diff --git a/pkg/nsqclient/pointer.go b/pkg/nsqclient/pointer.go
new file mode 100644
index 0000000..1cba795
--- /dev/null
+++ b/pkg/nsqclient/pointer.go
@@ -0,0 +1,57 @@
+package nsqclient
+
+// #include <stdlib.h>
+import "C"
+import (
+ "sync"
+ "unsafe"
+)
+
+var (
+ mutex sync.RWMutex
+ store = map[unsafe.Pointer]interface{}{}
+)
+
+func Save(v interface{}) unsafe.Pointer {
+ if v == nil {
+ return nil
+ }
+
+ // Generate real fake C pointer.
+ // This pointer will not store any data, but will bi used for indexing purposes.
+ // Since Go doest allow to cast dangling pointer to unsafe.Pointer, we do rally allocate one byte.
+ // Why we need indexing, because Go doest allow C code to store pointers to Go data.
+ var ptr unsafe.Pointer = C.malloc(C.size_t(1))
+ if ptr == nil {
+ panic("can't allocate 'cgo-pointer hack index pointer': ptr == nil")
+ }
+
+ mutex.Lock()
+ store[ptr] = v
+ mutex.Unlock()
+
+ return ptr
+}
+
+func Restore(ptr unsafe.Pointer) (v interface{}) {
+ if ptr == nil {
+ return nil
+ }
+
+ mutex.RLock()
+ v = store[ptr]
+ mutex.RUnlock()
+ return
+}
+
+func Unref(ptr unsafe.Pointer) {
+ if ptr == nil {
+ return
+ }
+
+ mutex.Lock()
+ delete(store, ptr)
+ mutex.Unlock()
+
+ C.free(ptr)
+}
diff --git a/pkg/nsqclient/pool.go b/pkg/nsqclient/pool.go
new file mode 100644
index 0000000..b773490
--- /dev/null
+++ b/pkg/nsqclient/pool.go
@@ -0,0 +1,25 @@
+// Package pool implements a pool of net.Conn interfaces to manage and reuse them.
+package nsqclient
+
+import "errors"
+
+var (
+ // ErrClosed is the error resulting if the pool is closed via pool.Close().
+ ErrClosed = errors.New("pool is closed")
+)
+
+// Pool interface describes a pool implementation. A pool should have maximum
+// capacity. An ideal pool is threadsafe and easy to use.
+type Pool interface {
+ // Get returns a new connection from the pool. Closing the connections puts
+ // it back to the Pool. Closing it when the pool is destroyed or full will
+ // be counted as an error.
+ Get() (*PoolConn, error)
+
+ // Close closes the pool and all its connections. After Close() the pool is
+ // no longer usable.
+ Close()
+
+ // Len returns the current number of connections of the pool.
+ Len() int
+}
diff --git a/pkg/nsqclient/producer.go b/pkg/nsqclient/producer.go
new file mode 100644
index 0000000..717c7a1
--- /dev/null
+++ b/pkg/nsqclient/producer.go
@@ -0,0 +1,139 @@
+package nsqclient
+
+import (
+ "fmt"
+ "time"
+
+ nsq "github.com/nsqio/go-nsq"
+)
+
+type Producer interface {
+ Publish(topic string, body []byte) error
+ MultiPublish(topic string, body [][]byte) error
+ DeferredPublish(topic string, delay time.Duration, body []byte) error
+}
+
+var _ Producer = (*producer)(nil)
+
+type producer struct {
+ pool Pool
+}
+
+var (
+ // name pool producer
+ nsqList = make(map[string]Pool)
+)
+
+type Config struct {
+ Addr string `toml:"addr" json:"addr"`
+ InitSize int `toml:"init_size" json:"init_size"`
+ MaxSize int `toml:"max_size" json:"max_size"`
+}
+
+func CreateProducerPool(configs map[string]Config) {
+ for name, conf := range configs {
+ n, err := newProducerPool(conf.Addr, conf.InitSize, conf.MaxSize)
+ if err == nil {
+ nsqList[name] = n
+ // 鏀寔ip:port瀵诲潃
+ nsqList[conf.Addr] = n
+ }
+ }
+}
+
+func DestroyProducerPool() {
+ for _, p := range nsqList {
+ p.Close()
+ }
+}
+
+func GetProducer(key ...string) (*producer, error) {
+ k := "default"
+ if len(key) > 0 {
+ k = key[0]
+ }
+ if n, ok := nsqList[k]; ok {
+ return &producer{n}, nil
+ }
+ return nil, fmt.Errorf("GetProducer can't get producer")
+}
+
+// CreateNSQProducer create nsq producer
+func newProducer(addr string, options ...func(*nsq.Config)) (*nsq.Producer, error) {
+ cfg := nsq.NewConfig()
+ for _, option := range options {
+ option(cfg)
+ }
+
+ producer, err := nsq.NewProducer(addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ // producer.SetLogger(log.New(os.Stderr, "", log.Flags()), nsq.LogLevelError)
+ return producer, nil
+}
+
+// CreateNSQProducerPool create a nwq producer pool
+func newProducerPool(addr string, initSize, maxSize int, options ...func(*nsq.Config)) (Pool, error) {
+ factory := func() (*nsq.Producer, error) {
+ // TODO 杩欓噷搴旇鎵цping鏂规硶鏉ョ‘瀹氳繛鎺ユ槸姝e父鐨勫惁鍒欎笉搴旇鍒涘缓conn
+ return newProducer(addr, options...)
+ }
+ nsqPool, err := NewChannelPool(initSize, maxSize, factory)
+ if err != nil {
+ return nil, err
+ }
+ return nsqPool, nil
+}
+
+func NewProducer(addr string) (*producer, error) {
+ CreateProducerPool(map[string]Config{"default": {addr, 1, 1}})
+ return GetProducer()
+}
+
+func retry(num int, fn func() error) error {
+ var err error
+ for i := 0; i < num; i++ {
+ err = fn()
+ if err == nil {
+ break
+ }
+ }
+ return err
+}
+
+func (p *producer) Publish(topic string, body []byte) error {
+ nsq, err := p.pool.Get()
+ if err != nil {
+ return err
+ }
+ defer nsq.Close()
+
+ return retry(2, func() error {
+ return nsq.Publish(topic, body)
+ })
+}
+
+func (p *producer) MultiPublish(topic string, body [][]byte) error {
+ nsq, err := p.pool.Get()
+ if err != nil {
+ return err
+ }
+ defer nsq.Close()
+
+ return retry(2, func() error {
+ return nsq.MultiPublish(topic, body)
+ })
+}
+
+func (p *producer) DeferredPublish(topic string, delay time.Duration, body []byte) error {
+ nsq, err := p.pool.Get()
+ if err != nil {
+ return err
+ }
+ defer nsq.Close()
+
+ return retry(2, func() error {
+ return nsq.DeferredPublish(topic, delay, body)
+ })
+}
diff --git a/pkg/sqlitex/sqlitex.go b/pkg/sqlitex/sqlitex.go
new file mode 100644
index 0000000..2c221a5
--- /dev/null
+++ b/pkg/sqlitex/sqlitex.go
@@ -0,0 +1,57 @@
+package sqlitex
+
+import (
+ "go.uber.org/zap"
+ "gorm.io/driver/sqlite"
+ "gorm.io/gorm"
+ "gorm.io/gorm/logger"
+ "gorm.io/gorm/schema"
+ "moul.io/zapgorm2"
+ "time"
+)
+
+type Conf struct {
+ LogMode bool
+ MaxIdleCon int64
+ MaxOpenCon int64
+ ConnMaxLifeTimeSecond int64
+ ConnMaxIdleTimeSecond int64
+ Dsn string
+ Host string
+}
+
+var openDb *gorm.DB
+
+func Init(conf *Conf, log *zap.Logger) error {
+ gConfig := &gorm.Config{
+ PrepareStmt: true,
+ NamingStrategy: schema.NamingStrategy{
+ TablePrefix: "",
+ SingularTable: true,
+ },
+ DisableForeignKeyConstraintWhenMigrating: true,
+ }
+ dbLogger := zapgorm2.New(log).LogMode(logger.Info)
+ if !conf.LogMode {
+ dbLogger = dbLogger.LogMode(logger.Silent)
+ }
+ gConfig.Logger = dbLogger
+ db, err := gorm.Open(sqlite.Open(conf.Dsn), gConfig)
+ if err != nil {
+ return err
+ }
+ sqlDb, err := db.DB()
+ if err != nil {
+ return err
+ }
+ sqlDb.SetMaxIdleConns(int(conf.MaxIdleCon))
+ sqlDb.SetMaxOpenConns(int(conf.MaxOpenCon))
+ sqlDb.SetConnMaxLifetime(time.Duration(conf.ConnMaxLifeTimeSecond) * time.Second)
+ sqlDb.SetConnMaxIdleTime(time.Duration(conf.ConnMaxIdleTimeSecond) * time.Second)
+ openDb = db
+ return nil
+}
+
+func GetDB() *gorm.DB {
+ return openDb
+}
diff --git a/pkg/structx/structx.go b/pkg/structx/structx.go
new file mode 100644
index 0000000..e038656
--- /dev/null
+++ b/pkg/structx/structx.go
@@ -0,0 +1,16 @@
+package structx
+
+import "encoding/json"
+
+func AssignTo(from interface{}, to interface{}) error {
+ data, err := json.Marshal(from)
+ if err != nil {
+ return err
+ }
+
+ err = json.Unmarshal(data, to)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/router/index.go b/router/index.go
index bf849d2..3e5f9c7 100644
--- a/router/index.go
+++ b/router/index.go
@@ -39,5 +39,11 @@
noticeGroup.POST("task/start", noticeApi.TaskStart) // 浠诲姟寮�鍚�氱煡
}
+ taskApi := new(v1.TaskApi)
+ taskGroup := v1Group.Group("task")
+ {
+ taskGroup.GET("list", taskApi.TaskList) // 浠诲姟寮�鍚�氱煡
+ }
+
return Router
}
diff --git a/service/task.go b/service/task.go
new file mode 100644
index 0000000..bf65ebf
--- /dev/null
+++ b/service/task.go
@@ -0,0 +1,27 @@
+package service
+
+import (
+ "apsClient/model"
+ "apsClient/pkg/ecode"
+)
+
+type TaskService struct {
+}
+
+func NewTaskService() *TaskService {
+ return &TaskService{}
+}
+
+func (slf TaskService) GetTaskList(page, pageSize int) (taskList []*model.ScheduleTask, total int64, code int) {
+ search := model.NewScheduleTaskSearch(nil)
+ search.SetPage(page, pageSize).
+ SetPreload(true)
+ //SetEndTime(time.Now().Unix())
+ var err error
+ taskList, total, err = search.Find()
+ if err != nil {
+ return
+ }
+ return taskList, total, ecode.OK
+
+}
--
Gitblit v1.8.0