diff --git a/App.go b/App.go index 4a8a8ca11d..531d1ef46a 100644 --- a/App.go +++ b/App.go @@ -35,6 +35,7 @@ import ( "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/casbin/casbin" + casbinv2 "github.com/casbin/casbin/v2" authMiddleware "github.com/devtron-labs/authenticator/middleware" "github.com/devtron-labs/devtron/api/router" "github.com/devtron-labs/devtron/api/sse" @@ -50,6 +51,7 @@ type App struct { Logger *zap.SugaredLogger SSE *sse.SSE Enforcer *casbin.SyncedEnforcer + EnforcerV2 *casbinv2.SyncedEnforcer server *http.Server db *pg.DB posthogClient *telemetry.PosthogClient @@ -74,6 +76,7 @@ func NewApp(router *router.MuxRouter, centralEventProcessor *eventProcessor.CentralEventProcessor, pubSubClient *pubsub.PubSubClientServiceImpl, workflowEventProcessorImpl *in.WorkflowEventProcessorImpl, + enforcerV2 *casbinv2.SyncedEnforcer, ) *App { //check argo connection //todo - check argo-cd version on acd integration installation @@ -82,6 +85,7 @@ func NewApp(router *router.MuxRouter, Logger: Logger, SSE: sse, Enforcer: enforcer, + EnforcerV2: enforcerV2, db: db, serveTls: false, sessionManager2: sessionManager2, diff --git a/api/auth/user/wire_user.go b/api/auth/user/wire_user.go index 77e414b683..6c421698d5 100644 --- a/api/auth/user/wire_user.go +++ b/api/auth/user/wire_user.go @@ -56,7 +56,7 @@ var UserWireSet = wire.NewSet( casbin.NewEnforcerImpl, wire.Bind(new(casbin.Enforcer), new(*casbin.EnforcerImpl)), - casbin.Create, + casbin.Create, casbin.CreateV2, user2.NewUserCommonServiceImpl, wire.Bind(new(user2.UserCommonService), new(*user2.UserCommonServiceImpl)), diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index 3e303d78ad..0994cbd0fe 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -145,7 +145,11 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - enforcerImpl, err := casbin.NewEnforcerImpl(syncedEnforcer, sessionManager, sugaredLogger) + casbinSyncedEnforcer, err := casbin.CreateV2() + if err != nil { + return nil, err + } + enforcerImpl, err := casbin.NewEnforcerImpl(syncedEnforcer, casbinSyncedEnforcer, sessionManager, sugaredLogger) if err != nil { return nil, err } diff --git a/env_gen.md b/env_gen.md index fc828f3832..1ed120c5f6 100644 --- a/env_gen.md +++ b/env_gen.md @@ -246,6 +246,7 @@ | USE_BLOB_STORAGE_CONFIG_IN_CI_WORKFLOW | true | | | USE_BUILDX | false | | | USE_CUSTOM_HTTP_TRANSPORT | false | | + | USE_CASBIN_V2 | false | | | USE_EXTERNAL_NODE | false | | | USE_GIT_CLI | false | | | USE_IMAGE_TAG_FROM_GIT_PROVIDER_FOR_TAG_BASED_BUILD | false | | diff --git a/go.mod b/go.mod index 78d3ba79bb..91835940e1 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/caarlos0/env v3.5.0+incompatible github.com/caarlos0/env/v6 v6.7.2 github.com/casbin/casbin v1.9.1 + github.com/casbin/casbin/v2 v2.97.0 github.com/casbin/xorm-adapter v1.0.1-0.20190716004226-a317737a1007 github.com/coreos/go-oidc v2.2.1+incompatible github.com/davecgh/go-spew v1.1.1 @@ -128,6 +129,8 @@ require ( github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect github.com/bombsimon/logrusr/v2 v2.0.1 // indirect github.com/bradleyfalzon/ghinstallation/v2 v2.5.0 // indirect + github.com/casbin/govaluate v1.1.0 // indirect + github.com/casbin/xorm-adapter/v2 v2.5.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect @@ -158,6 +161,7 @@ require ( github.com/go-xorm/xorm v0.7.9 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/go-github/v53 v53.0.0 // indirect @@ -234,6 +238,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stretchr/objx v0.5.0 // indirect + github.com/syndtr/goleveldb v1.0.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect @@ -291,8 +296,9 @@ require ( sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect upper.io/db.v3 v3.8.0+incompatible // indirect - xorm.io/builder v0.3.6 // indirect + xorm.io/builder v0.3.7 // indirect xorm.io/core v0.7.2 // indirect + xorm.io/xorm v1.0.3 // indirect ) replace ( diff --git a/go.sum b/go.sum index f7f393930c..d6f609c200 100644 --- a/go.sum +++ b/go.sum @@ -14,6 +14,7 @@ cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/o cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-storage-blob-go v0.12.0 h1:7bFXA1QB+lOK2/ASWHhp6/vnxjaeeZq6t8w1Jyp0Iaw= @@ -63,6 +64,7 @@ github.com/Pallinder/go-randomdata v1.2.0/go.mod h1:yHmJgulpD2Nfrm0cR9tI/+oAgRqC github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -75,6 +77,7 @@ github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZp github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.30.3 h1:hrqDB4cHFSHQf4gO3xu6YKQg8PqJpNjLYsQAFYHstqw= github.com/alicebob/miniredis/v2 v2.30.3/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= +github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -135,8 +138,15 @@ github.com/caarlos0/env/v6 v6.7.2 h1:Jiy2dBHvNgCfNGMP0hOZW6jHUbiENvP+VWDtLz4n1Kg github.com/caarlos0/env/v6 v6.7.2/go.mod h1:FE0jGiAnQqtv2TenJ4KTa8+/T2Ss8kdS5s1VEjasoN0= github.com/casbin/casbin v1.9.1 h1:ucjbS5zTrmSLtH4XogqOG920Poe6QatdXtz1FEbApeM= github.com/casbin/casbin v1.9.1/go.mod h1:z8uPsfBJGUsnkagrt3G8QvjgTKFMBJ32UP8HpZllfog= +github.com/casbin/casbin/v2 v2.28.3/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/casbin/casbin/v2 v2.97.0 h1:FFHIzY+6fLIcoAB/DKcG5xvscUo9XqRpBniRYhlPWkg= +github.com/casbin/casbin/v2 v2.97.0/go.mod h1:jX8uoN4veP85O/n2674r2qtfSXI6myvxW85f6TH50fw= +github.com/casbin/govaluate v1.1.0 h1:6xdCWIpE9CwHdZhlVQW+froUrCsjb6/ZYNcXODfLT+E= +github.com/casbin/govaluate v1.1.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= github.com/casbin/xorm-adapter v1.0.1-0.20190716004226-a317737a1007 h1:KEBrEhQjSCzUt5bQKxX8ZbS3S46sRnzOmwemTOu+LLQ= github.com/casbin/xorm-adapter v1.0.1-0.20190716004226-a317737a1007/go.mod h1:6sy40UQdWR0blO1DJdEzbcu6rcEW89odCMcEdoB1qdM= +github.com/casbin/xorm-adapter/v2 v2.5.1 h1:BkpIxRHKa0s3bSMx173PpuU7oTs+Zw7XmD0BIta0HGM= +github.com/casbin/xorm-adapter/v2 v2.5.1/go.mod h1:AeH4dBKHC9/zYxzdPVHhPDzF8LYLqjDdb767CWJoV54= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -183,6 +193,7 @@ github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsP github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o= github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/devtron-labs/authenticator v0.4.35-0.20240607135426-c86e868ecee1 h1:qdkpTAo2Kr0ZicZIVXfNwsGSshpc9OB9j9RzmKYdIwY= github.com/devtron-labs/authenticator v0.4.35-0.20240607135426-c86e868ecee1/go.mod h1:IkKPPEfgLCMR29he5yv2OCC6iM2R7K5/0AA3k8b9XNc= github.com/devtron-labs/common-lib v0.0.21-0.20240628105542-603b4f777e00 h1:xSZulEz0PaTA7tL4Es/uNFUmgjD6oAv8gxJV49GPWHk= @@ -299,6 +310,7 @@ github.com/go-redis/cache/v9 v9.0.0/go.mod h1:cMwi1N8ASBOufbIvk7cdXe2PbPjK/WMRL9 github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -319,6 +331,7 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= @@ -328,6 +341,7 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -349,6 +363,8 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= @@ -553,6 +569,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -577,6 +595,7 @@ github.com/mattn/go-isatty v0.0.0-20160806122752-66b8e73f3f5c/go.mod h1:M+lRXTBq github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -774,6 +793,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw= github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -910,6 +931,7 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -926,7 +948,9 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1076,6 +1100,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1280,6 +1305,10 @@ upper.io/db.v3 v3.8.0+incompatible h1:XNeEO2vQRVqq70M98ghzq6M30F5Bzo+99ess5v+eVY upper.io/db.v3 v3.8.0+incompatible/go.mod h1:FgTdD24eBjJAbPKsQSiHUNgXjOR4Lub3u1UMHSIh82Y= xorm.io/builder v0.3.6 h1:ha28mQ2M+TFx96Hxo+iq6tQgnkC9IZkM6D8w9sKHHF8= xorm.io/builder v0.3.6/go.mod h1:LEFAPISnRzG+zxaxj2vPicRwz67BdhFreKg8yv8/TgU= +xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI= +xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= xorm.io/core v0.7.2-0.20190928055935-90aeac8d08eb/go.mod h1:jJfd0UAEzZ4t87nbQYtVjmqpIODugN6PD2D9E+dJvdM= xorm.io/core v0.7.2 h1:mEO22A2Z7a3fPaZMk6gKL/jMD80iiyNwRrX5HOv3XLw= xorm.io/core v0.7.2/go.mod h1:jJfd0UAEzZ4t87nbQYtVjmqpIODugN6PD2D9E+dJvdM= +xorm.io/xorm v1.0.3 h1:3dALAohvINu2mfEix5a5x5ZmSVGSljinoSGgvGbaZp0= +xorm.io/xorm v1.0.3/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4= diff --git a/pkg/auth/authorisation/casbin/Adapter.go b/pkg/auth/authorisation/casbin/Adapter.go index 7b3a76b46d..4a99f1499c 100644 --- a/pkg/auth/authorisation/casbin/Adapter.go +++ b/pkg/auth/authorisation/casbin/Adapter.go @@ -19,19 +19,31 @@ package casbin import ( "fmt" "log" + "os" "strings" xormadapter "github.com/casbin/xorm-adapter" + xormadapter2 "github.com/casbin/xorm-adapter/v2" "github.com/casbin/casbin" + casbinv2 "github.com/casbin/casbin/v2" "github.com/devtron-labs/devtron/pkg/sql" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const CasbinDefaultDatabase = "casbin" +type Version string + +const ( + CasbinV1 Version = "V1" + CasbinV2 Version = "V2" +) + var e *casbin.SyncedEnforcer +var e2 *casbinv2.SyncedEnforcer var enforcerImplRef *EnforcerImpl +var casbinVersion Version type Subject string type Resource string @@ -47,7 +59,24 @@ type Policy struct { Obj Object `json:"obj"` } +func isV2() bool { + return casbinVersion == CasbinV2 +} + +func setCasbinVersion() { + version := os.Getenv("USE_CASBIN_V2") + if version == "true" { + casbinVersion = CasbinV2 + return + } + casbinVersion = CasbinV1 +} + func Create() (*casbin.SyncedEnforcer, error) { + setCasbinVersion() + if isV2() { + return nil, nil + } metav1.Now() config, err := sql.GetConfig() //FIXME: use this from wire if err != nil { @@ -81,6 +110,47 @@ func Create() (*casbin.SyncedEnforcer, error) { return e, nil } +func CreateV2() (*casbinv2.SyncedEnforcer, error) { + setCasbinVersion() + if !isV2() { + return nil, nil + } + + metav1.Now() + config, err := sql.GetConfig() + if err != nil { + log.Println(err) + return nil, err + } + dbSpecified := true + if config.CasbinDatabase == CasbinDefaultDatabase { + dbSpecified = false + } + dataSource := fmt.Sprintf("dbname=%s user=%s password=%s host=%s port=%s sslmode=disable", config.CasbinDatabase, config.User, config.Password, config.Addr, config.Port) + a, err := xormadapter2.NewAdapter("postgres", dataSource, dbSpecified) // Your driver and data source. + if err != nil { + log.Println(err) + return nil, err + } + //Adapter + + auth, err1 := casbinv2.NewSyncedEnforcer("./auth_model.conf", a) + if err1 != nil { + log.Println(err) + return nil, err + } + e2 = auth + err = e2.LoadPolicy() + if err != nil { + log.Println(err) + return nil, err + } + log.Println("v2 casbin Policies Loaded Successfully") + //adding our key matching func - MatchKeyFunc, to enforcer + e2.AddFunction("matchKeyByPart", MatchKeyByPartFunc) + return e2, nil +} + func setEnforcerImpl(ref *EnforcerImpl) { enforcerImplRef = ref } @@ -89,6 +159,7 @@ func AddPolicy(policies []Policy) []Policy { defer handlePanic() var failed = []Policy{} emailIdList := map[string]struct{}{} + var err error for _, p := range policies { success := false if strings.ToLower(string(p.Type)) == "p" && p.Sub != "" && p.Res != "" && p.Act != "" && p.Obj != "" { @@ -96,11 +167,26 @@ func AddPolicy(policies []Policy) []Policy { res := strings.ToLower(string(p.Res)) act := strings.ToLower(string(p.Act)) obj := strings.ToLower(string(p.Obj)) - success = e.AddPolicy([]string{sub, res, act, obj, "allow"}) + if isV2() { + success, err = e2.AddPolicy([]string{sub, res, act, obj, "allow"}) + if err != nil { + log.Println(err) + } + } else { + success = e.AddPolicy([]string{sub, res, act, obj, "allow"}) + } + } else if strings.ToLower(string(p.Type)) == "g" && p.Sub != "" && p.Obj != "" { sub := strings.ToLower(string(p.Sub)) obj := strings.ToLower(string(p.Obj)) - success = e.AddGroupingPolicy([]string{sub, obj}) + if isV2() { + success, err = e2.AddGroupingPolicy([]string{sub, obj}) + if err != nil { + log.Println(err) + } + } else { + success = e.AddGroupingPolicy([]string{sub, obj}) + } } if !success { failed = append(failed, p) @@ -122,8 +208,6 @@ func LoadPolicy() { err := enforcerImplRef.ReloadPolicy() if err != nil { fmt.Println("error in reloading policies", err) - } else { - fmt.Println("policy reloaded successfully") } } @@ -131,12 +215,27 @@ func RemovePolicy(policies []Policy) []Policy { defer handlePanic() var failed = []Policy{} emailIdList := map[string]struct{}{} + var err error for _, p := range policies { success := false if strings.ToLower(string(p.Type)) == "p" && p.Sub != "" && p.Res != "" && p.Act != "" && p.Obj != "" { - success = e.RemovePolicy([]string{strings.ToLower(string(p.Sub)), strings.ToLower(string(p.Res)), strings.ToLower(string(p.Act)), strings.ToLower(string(p.Obj))}) + if isV2() { + success, err = e2.RemovePolicy([]string{strings.ToLower(string(p.Sub)), strings.ToLower(string(p.Res)), strings.ToLower(string(p.Act)), strings.ToLower(string(p.Obj))}) + if err != nil { + log.Println(err) + } + } else { + success = e.RemovePolicy([]string{strings.ToLower(string(p.Sub)), strings.ToLower(string(p.Res)), strings.ToLower(string(p.Act)), strings.ToLower(string(p.Obj))}) + } } else if strings.ToLower(string(p.Type)) == "g" && p.Sub != "" && p.Obj != "" { - success = e.RemoveGroupingPolicy([]string{strings.ToLower(string(p.Sub)), strings.ToLower(string(p.Obj))}) + if isV2() { + success, err = e2.RemoveGroupingPolicy([]string{strings.ToLower(string(p.Sub)), strings.ToLower(string(p.Obj))}) + if err != nil { + log.Println(err) + } + } else { + success = e.RemoveGroupingPolicy([]string{strings.ToLower(string(p.Sub)), strings.ToLower(string(p.Obj))}) + } } if !success { failed = append(failed, p) @@ -154,30 +253,61 @@ func RemovePolicy(policies []Policy) []Policy { } func GetAllSubjects() []string { + if isV2() { + subjects, err := e2.GetAllSubjects() + if err != nil { + log.Println(err) + } + return subjects + } return e.GetAllSubjects() } func DeleteRoleForUser(user string, role string) bool { user = strings.ToLower(user) role = strings.ToLower(role) - response := e.DeleteRoleForUser(user, role) + var response bool + var err error + if isV2() { + response, err = e2.DeleteRoleForUser(user, role) + if err != nil { + log.Println(err) + } + } else { + response = e.DeleteRoleForUser(user, role) + } enforcerImplRef.InvalidateCache(user) return response } func GetRolesForUser(user string) ([]string, error) { user = strings.ToLower(user) + if isV2() { + return e2.GetRolesForUser(user) + } return e.GetRolesForUser(user) } func GetUserByRole(role string) ([]string, error) { role = strings.ToLower(role) + if isV2() { + return e2.GetUsersForRole(role) + } return e.GetUsersForRole(role) } func RemovePoliciesByRoles(roles string) bool { roles = strings.ToLower(roles) - policyResponse := e.RemovePolicy([]string{roles}) + var policyResponse bool + var err error + if isV2() { + policyResponse, err = e2.RemovePolicy([]string{roles}) + if err != nil { + log.Println(err) + } + } else { + policyResponse = e.RemovePolicy([]string{roles}) + } enforcerImplRef.InvalidateCompleteCache() return policyResponse } @@ -191,8 +321,16 @@ func RemovePoliciesByAllRoles(roles []string) bool { rolesLower = append(rolesLower, strings.ToLower(role)) } var policyResponse bool + var err error for _, role := range rolesLower { - policyResponse = e.RemovePolicy([]string{role}) + if isV2() { + policyResponse, err = e2.RemovePolicy([]string{role}) + if err != nil { + log.Println(err) + } + } else { + policyResponse = e.RemovePolicy([]string{role}) + } } enforcerImplRef.InvalidateCompleteCache() return policyResponse diff --git a/pkg/auth/authorisation/casbin/rbac.go b/pkg/auth/authorisation/casbin/rbac.go index 6158df91fd..4dcfc5efc9 100644 --- a/pkg/auth/authorisation/casbin/rbac.go +++ b/pkg/auth/authorisation/casbin/rbac.go @@ -19,6 +19,7 @@ package casbin import ( "encoding/json" "fmt" + casbinv2 "github.com/casbin/casbin/v2" "log" "math" "strings" @@ -50,6 +51,7 @@ type Enforcer interface { func NewEnforcerImpl( enforcer *casbin.SyncedEnforcer, + enforcerV2 *casbinv2.SyncedEnforcer, sessionManager *middleware.SessionManager, logger *zap.SugaredLogger) (*EnforcerImpl, error) { lock := make(map[string]*CacheData) @@ -59,7 +61,7 @@ func NewEnforcerImpl( return nil, err } enf := &EnforcerImpl{lockCacheData: lock, enforcerRWLock: &sync.RWMutex{}, batchRequestLock: batchRequestLock, enforcerConfig: enforcerConfig, - Cache: getEnforcerCache(logger, enforcerConfig), SyncedEnforcer: enforcer, logger: logger, SessionManager: sessionManager} + Cache: getEnforcerCache(logger, enforcerConfig), Enforcer: enforcer, EnforcerV2: enforcerV2, logger: logger, SessionManager: sessionManager} setEnforcerImpl(enf) return enf, nil } @@ -74,6 +76,7 @@ type EnforcerConfig struct { CacheEnabled bool `env:"ENFORCER_CACHE" envDefault:"false"` CacheExpirationInSecs int `env:"ENFORCER_CACHE_EXPIRATION_IN_SEC" envDefault:"86400"` EnforcerBatchSize int `env:"ENFORCER_MAX_BATCH_SIZE" envDefault:"1"` + UseCasbinV2 bool `env:"USE_CASBIN_V2" envDefault:"false"` } func getConfig() (*EnforcerConfig, error) { @@ -106,7 +109,8 @@ type EnforcerImpl struct { lockCacheData map[string]*CacheData batchRequestLock map[string]*sync.Mutex *cache.Cache - *casbin.SyncedEnforcer + Enforcer *casbin.SyncedEnforcer + EnforcerV2 *casbinv2.SyncedEnforcer *middleware.SessionManager logger *zap.SugaredLogger enforcerConfig *EnforcerConfig @@ -130,7 +134,21 @@ func (e *EnforcerImpl) EnforceInBatch(token string, resource string, action stri func (e *EnforcerImpl) ReloadPolicy() error { //e.enforcerRWLock.Lock() //defer e.enforcerRWLock.Unlock() - return e.SyncedEnforcer.LoadPolicy() + var err error + if e.enforcerConfig.UseCasbinV2 { + err = e.EnforcerV2.LoadPolicy() + if err != nil { + return err + } + fmt.Println("V2 policy reloaded successfully") + return nil + } + err = e.Enforcer.LoadPolicy() + if err != nil { + return err + } + fmt.Println("policy reloaded successfully") + return nil } // EnforceErr is a convenience helper to wrap a failed enforcement with a detailed error about the request @@ -413,10 +431,20 @@ func (e *EnforcerImpl) enforceAndUpdateCache(email string, resource string, acti func (e *EnforcerImpl) enforcerEnforce(email string, resource string, action string, resourceItem string) (bool, error) { //e.enforcerRWLock.RLock() //defer e.enforcerRWLock.RUnlock() - response, err := e.SyncedEnforcer.EnforceSafe(email, resource, action, resourceItem) - if err != nil { - e.logger.Errorw("error occurred while enforcing safe", "email", email, - "resource", resource, "action", action, "resourceItem", resourceItem, "reason", err) + var response bool + var err error + if isV2() { + response, err = e.EnforcerV2.Enforce(email, resource, action, resourceItem) + if err != nil { + e.logger.Errorw("error occurred while EnforcerV2 Enforce", "email", email, + "resource", resource, "action", action, "resourceItem", resourceItem, "reason", err) + } + } else { + response, err = e.Enforcer.EnforceSafe(email, resource, action, resourceItem) + if err != nil { + e.logger.Errorw("error occurred while enforcing safe", "email", email, + "resource", resource, "action", action, "resourceItem", resourceItem, "reason", err) + } } return response, err } diff --git a/vendor/github.com/casbin/casbin/v2/.gitignore b/vendor/github.com/casbin/casbin/v2/.gitignore new file mode 100644 index 0000000000..da27805f5b --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/.gitignore @@ -0,0 +1,30 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.idea/ +*.iml + +# vendor files +vendor diff --git a/vendor/github.com/casbin/casbin/v2/.golangci.yml b/vendor/github.com/casbin/casbin/v2/.golangci.yml new file mode 100644 index 0000000000..b8d3620198 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/.golangci.yml @@ -0,0 +1,354 @@ +# Based on https://gist.github.com/maratori/47a4d00457a92aa426dbd48a18776322 +# This code is licensed under the terms of the MIT license https://opensource.org/license/mit +# Copyright (c) 2021 Marat Reymers + +## Golden config for golangci-lint v1.56.2 +# +# This is the best config for golangci-lint based on my experience and opinion. +# It is very strict, but not extremely strict. +# Feel free to adapt and change it for your needs. + +run: + # Timeout for analysis, e.g. 30s, 5m. + # Default: 1m + timeout: 3m + + +# This file contains only configs which differ from defaults. +# All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml +linters-settings: + cyclop: + # The maximal code complexity to report. + # Default: 10 + max-complexity: 30 + # The maximal average package complexity. + # If it's higher than 0.0 (float) the check is enabled + # Default: 0.0 + package-average: 10.0 + + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: true + + exhaustive: + # Program elements to check for exhaustiveness. + # Default: [ switch ] + check: + - switch + - map + + exhaustruct: + # List of regular expressions to exclude struct packages and their names from checks. + # Regular expressions must match complete canonical struct package/name/structname. + # Default: [] + exclude: + # std libs + - "^net/http.Client$" + - "^net/http.Cookie$" + - "^net/http.Request$" + - "^net/http.Response$" + - "^net/http.Server$" + - "^net/http.Transport$" + - "^net/url.URL$" + - "^os/exec.Cmd$" + - "^reflect.StructField$" + # public libs + - "^github.com/Shopify/sarama.Config$" + - "^github.com/Shopify/sarama.ProducerMessage$" + - "^github.com/mitchellh/mapstructure.DecoderConfig$" + - "^github.com/prometheus/client_golang/.+Opts$" + - "^github.com/spf13/cobra.Command$" + - "^github.com/spf13/cobra.CompletionOptions$" + - "^github.com/stretchr/testify/mock.Mock$" + - "^github.com/testcontainers/testcontainers-go.+Request$" + - "^github.com/testcontainers/testcontainers-go.FromDockerfile$" + - "^golang.org/x/tools/go/analysis.Analyzer$" + - "^google.golang.org/protobuf/.+Options$" + - "^gopkg.in/yaml.v3.Node$" + + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: 100 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: 50 + # Ignore comments when counting lines. + # Default false + ignore-comments: true + + gocognit: + # Minimal code complexity to report. + # Default: 30 (but we recommend 10-20) + min-complexity: 20 + + gocritic: + # Settings passed to gocritic. + # The settings key is the name of a supported gocritic checker. + # The list of supported checkers can be find in https://go-critic.github.io/overview. + settings: + captLocal: + # Whether to restrict checker to params only. + # Default: true + paramsOnly: false + underef: + # Whether to skip (*x).method() calls where x is a pointer receiver. + # Default: true + skipRecvDeref: false + + gomnd: + # List of function patterns to exclude from analysis. + # Values always ignored: `time.Date`, + # `strconv.FormatInt`, `strconv.FormatUint`, `strconv.FormatFloat`, + # `strconv.ParseInt`, `strconv.ParseUint`, `strconv.ParseFloat`. + # Default: [] + ignored-functions: + - flag.Arg + - flag.Duration.* + - flag.Float.* + - flag.Int.* + - flag.Uint.* + - os.Chmod + - os.Mkdir.* + - os.OpenFile + - os.WriteFile + - prometheus.ExponentialBuckets.* + - prometheus.LinearBuckets + + gomodguard: + blocked: + # List of blocked modules. + # Default: [] + modules: + - github.com/golang/protobuf: + recommendations: + - google.golang.org/protobuf + reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules" + - github.com/satori/go.uuid: + recommendations: + - github.com/google/uuid + reason: "satori's package is not maintained" + - github.com/gofrs/uuid: + recommendations: + - github.com/gofrs/uuid/v5 + reason: "gofrs' package was not go module before v5" + + govet: + # Enable all analyzers. + # Default: false + enable-all: true + # Disable analyzers by name. + # Run `go tool vet help` to see all analyzers. + # Default: [] + disable: + - fieldalignment # too strict + # Settings per analyzer. + settings: + shadow: + # Whether to be strict about shadowing; can be noisy. + # Default: false + #strict: true + + inamedparam: + # Skips check for interface methods with only a single parameter. + # Default: false + skip-single-param: true + + nakedret: + # Make an issue if func has more lines of code than this setting, and it has naked returns. + # Default: 30 + max-func-lines: 0 + + nolintlint: + # Exclude following linters from requiring an explanation. + # Default: [] + allow-no-explanation: [ funlen, gocognit, lll ] + # Enable to require an explanation of nonzero length after each nolint directive. + # Default: false + require-explanation: true + # Enable to require nolint directives to mention the specific linter being suppressed. + # Default: false + require-specific: true + + perfsprint: + # Optimizes into strings concatenation. + # Default: true + strconcat: false + + rowserrcheck: + # database/sql is always checked + # Default: [] + packages: + - github.com/jmoiron/sqlx + + tenv: + # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. + # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. + # Default: false + all: true + + stylecheck: + # STxxxx checks in https://staticcheck.io/docs/configuration/options/#checks + # Default: ["*"] + checks: ["all", "-ST1003"] + + revive: + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter + - name: unused-parameter + disabled: true + +linters: + disable-all: true + enable: + ## enabled by default + #- errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases + - gosimple # specializes in simplifying a code + - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ineffassign # detects when assignments to existing variables are not used + - staticcheck # is a go vet on steroids, applying a ton of static analysis checks + - typecheck # like the front-end of a Go compiler, parses and type-checks Go code + - unused # checks for unused constants, variables, functions and types + ## disabled by default + - asasalint # checks for pass []any as any in variadic func(...any) + - asciicheck # checks that your code does not contain non-ASCII identifiers + - bidichk # checks for dangerous unicode character sequences + - bodyclose # checks whether HTTP response body is closed successfully + - cyclop # checks function and package cyclomatic complexity + - dupl # tool for code clone detection + - durationcheck # checks for two durations multiplied together + - errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error + #- errorlint # finds code that will cause problems with the error wrapping scheme introduced in Go 1.13 + - execinquery # checks query string in Query function which reads your Go src files and warning it finds + - exhaustive # checks exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + #- forbidigo # forbids identifiers + - funlen # tool for detection of long functions + - gocheckcompilerdirectives # validates go compiler directive comments (//go:) + #- gochecknoglobals # checks that no global variables exist + - gochecknoinits # checks that no init functions are present in Go code + - gochecksumtype # checks exhaustiveness on Go "sum types" + #- gocognit # computes and checks the cognitive complexity of functions + #- goconst # finds repeated strings that could be replaced by a constant + #- gocritic # provides diagnostics that check for bugs, performance and style issues + - gocyclo # computes and checks the cyclomatic complexity of functions + - godot # checks if comments end in a period + - goimports # in addition to fixing imports, goimports also formats your code in the same style as gofmt + #- gomnd # detects magic numbers + - gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod + - gomodguard # allow and block lists linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations + - goprintffuncname # checks that printf-like functions are named with f at the end + - gosec # inspects source code for security problems + #- lll # reports long lines + - loggercheck # checks key value pairs for common logger libraries (kitlog,klog,logr,zap) + - makezero # finds slice declarations with non-zero initial length + - mirror # reports wrong mirror patterns of bytes/strings usage + - musttag # enforces field tags in (un)marshaled structs + - nakedret # finds naked returns in functions greater than a specified function length + - nestif # reports deeply nested if statements + - nilerr # finds the code that returns nil even if it checks that the error is not nil + #- nilnil # checks that there is no simultaneous return of nil error and an invalid value + - noctx # finds sending http request without context.Context + - nolintlint # reports ill-formed or insufficient nolint directives + #- nonamedreturns # reports all named returns + - nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL + #- perfsprint # checks that fmt.Sprintf can be replaced with a faster alternative + - predeclared # finds code that shadows one of Go's predeclared identifiers + - promlinter # checks Prometheus metrics naming via promlint + - protogetter # reports direct reads from proto message fields when getters should be used + - reassign # checks that package variables are not reassigned + - revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint + - rowserrcheck # checks whether Err of rows is checked successfully + - sloglint # ensure consistent code style when using log/slog + - spancheck # checks for mistakes with OpenTelemetry/Census spans + - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed + - stylecheck # is a replacement for golint + - tenv # detects using os.Setenv instead of t.Setenv since Go1.17 + - testableexamples # checks if examples are testable (have an expected output) + - testifylint # checks usage of github.com/stretchr/testify + #- testpackage # makes you use a separate _test package + - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes + - unconvert # removes unnecessary type conversions + #- unparam # reports unused function parameters + - usestdlibvars # detects the possibility to use variables/constants from the Go standard library + - wastedassign # finds wasted assignment statements + - whitespace # detects leading and trailing whitespace + + ## you may want to enable + #- decorder # checks declaration order and count of types, constants, variables and functions + #- exhaustruct # [highly recommend to enable] checks if all structure fields are initialized + #- gci # controls golang package import order and makes it always deterministic + #- ginkgolinter # [if you use ginkgo/gomega] enforces standards of using ginkgo and gomega + #- godox # detects FIXME, TODO and other comment keywords + #- goheader # checks is file header matches to pattern + #- inamedparam # [great idea, but too strict, need to ignore a lot of cases by default] reports interfaces with unnamed method parameters + #- interfacebloat # checks the number of methods inside an interface + #- ireturn # accept interfaces, return concrete types + #- prealloc # [premature optimization, but can be used in some cases] finds slice declarations that could potentially be preallocated + #- tagalign # checks that struct tags are well aligned + #- varnamelen # [great idea, but too many false positives] checks that the length of a variable's name matches its scope + #- wrapcheck # checks that errors returned from external packages are wrapped + #- zerologlint # detects the wrong usage of zerolog that a user forgets to dispatch zerolog.Event + + ## disabled + #- containedctx # detects struct contained context.Context field + #- contextcheck # [too many false positives] checks the function whether use a non-inherited context + #- depguard # [replaced by gomodguard] checks if package imports are in a list of acceptable packages + #- dogsled # checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) + #- dupword # [useless without config] checks for duplicate words in the source code + #- errchkjson # [don't see profit + I'm against of omitting errors like in the first example https://github.com/breml/errchkjson] checks types passed to the json encoding functions. Reports unsupported types and optionally reports occasions, where the check for the returned error can be omitted + #- forcetypeassert # [replaced by errcheck] finds forced type assertions + #- goerr113 # [too strict] checks the errors handling expressions + #- gofmt # [replaced by goimports] checks whether code was gofmt-ed + #- gofumpt # [replaced by goimports, gofumports is not available yet] checks whether code was gofumpt-ed + #- gosmopolitan # reports certain i18n/l10n anti-patterns in your Go codebase + #- grouper # analyzes expression groups + #- importas # enforces consistent import aliases + #- maintidx # measures the maintainability index of each function + #- misspell # [useless] finds commonly misspelled English words in comments + #- nlreturn # [too strict and mostly code is not more readable] checks for a new line before return and branch statements to increase code clarity + #- paralleltest # [too many false positives] detects missing usage of t.Parallel() method in your Go test + #- tagliatelle # checks the struct tags + #- thelper # detects golang test helpers without t.Helper() call and checks the consistency of test helpers + #- wsl # [too strict and mostly code is not more readable] whitespace linter forces you to use empty lines + + ## deprecated + #- deadcode # [deprecated, replaced by unused] finds unused code + #- exhaustivestruct # [deprecated, replaced by exhaustruct] checks if all struct's fields are initialized + #- golint # [deprecated, replaced by revive] golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes + #- ifshort # [deprecated] checks that your code uses short syntax for if-statements whenever possible + #- interfacer # [deprecated] suggests narrower interface types + #- maligned # [deprecated, replaced by govet fieldalignment] detects Go structs that would take less memory if their fields were sorted + #- nosnakecase # [deprecated, replaced by revive var-naming] detects snake case of variable naming and function name + #- scopelint # [deprecated, replaced by exportloopref] checks for unpinned variables in go programs + #- structcheck # [deprecated, replaced by unused] finds unused struct fields + #- varcheck # [deprecated, replaced by unused] finds unused global variables and constants + + +issues: + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 50 + + exclude-rules: + - source: "(noinspection|TODO)" + linters: [ godot ] + - source: "//noinspection" + linters: [ gocritic ] + - path: "_test\\.go" + linters: + - bodyclose + - dupl + - funlen + - goconst + - gosec + - noctx + - wrapcheck + # TODO: remove after PR is released https://github.com/golangci/golangci-lint/pull/4386 + - text: "fmt.Sprintf can be replaced with string addition" + linters: [ perfsprint ] \ No newline at end of file diff --git a/vendor/github.com/casbin/casbin/v2/.releaserc.json b/vendor/github.com/casbin/casbin/v2/.releaserc.json new file mode 100644 index 0000000000..58cb0bb4ca --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/.releaserc.json @@ -0,0 +1,16 @@ +{ + "debug": true, + "branches": [ + "+([0-9])?(.{+([0-9]),x}).x", + "master", + { + "name": "beta", + "prerelease": true + } + ], + "plugins": [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + "@semantic-release/github" + ] +} diff --git a/vendor/github.com/casbin/casbin/v2/.travis.yml b/vendor/github.com/casbin/casbin/v2/.travis.yml new file mode 100644 index 0000000000..cea21652e0 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/.travis.yml @@ -0,0 +1,15 @@ +language: go + +sudo: false + +env: + - GO111MODULE=on + +go: + - "1.11.13" + - "1.12" + - "1.13" + - "1.14" + +script: + - make test diff --git a/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md b/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..4bab59c93f --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md @@ -0,0 +1,35 @@ +# How to contribute + +The following is a set of guidelines for contributing to casbin and its libraries, which are hosted at [casbin organization at Github](https://github.com/casbin). + +This project adheres to the [Contributor Covenant 1.2.](https://www.contributor-covenant.org/version/1/2/0/code-of-conduct.html) By participating, you are expected to uphold this code. Please report unacceptable behavior to info@casbin.com. + +## Questions + +- We do our best to have an [up-to-date documentation](https://casbin.org/docs/overview) +- [Stack Overflow](https://stackoverflow.com) is the best place to start if you have a question. Please use the [casbin tag](https://stackoverflow.com/tags/casbin/info) we are actively monitoring. We encourage you to use Stack Overflow specially for Modeling Access Control Problems, in order to build a shared knowledge base. +- You can also join our [Discord](https://discord.gg/S5UjpzGZjN). + +## Reporting issues + +Reporting issues are a great way to contribute to the project. We are perpetually grateful about a well-written, through bug report. + +Before raising a new issue, check our [issue list](https://github.com/casbin/casbin/issues) to determine if it already contains the problem that you are facing. + +A good bug report shouldn't leave others needing to chase you for more information. Please be as detailed as possible. The following questions might serve as a template for writing a detailed report: + +What were you trying to achieve? +What are the expected results? +What are the received results? +What are the steps to reproduce the issue? +In what environment did you encounter the issue? + +Feature requests can also be submitted as issues. + +## Pull requests + +Good pull requests (e.g. patches, improvements, new features) are a fantastic help. They should remain focused in scope and avoid unrelated commits. + +Please ask first before embarking on any significant pull request (e.g. implementing new features, refactoring code etc.), otherwise you risk spending a lot of time working on something that the maintainers might not want to merge into the project. + +First add an issue to the project to discuss the improvement. Please adhere to the coding conventions used throughout the project. If in doubt, consult the [Effective Go style guide](https://golang.org/doc/effective_go.html). diff --git a/vendor/github.com/casbin/casbin/v2/LICENSE b/vendor/github.com/casbin/casbin/v2/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/casbin/casbin/v2/Makefile b/vendor/github.com/casbin/casbin/v2/Makefile new file mode 100644 index 0000000000..6db2b92071 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/Makefile @@ -0,0 +1,18 @@ +SHELL = /bin/bash +export PATH := $(shell yarn global bin):$(PATH) + +default: lint test + +test: + go test -race -v ./... + +benchmark: + go test -bench=. + +lint: + golangci-lint run --verbose + +release: + yarn global add semantic-release@17.2.4 + semantic-release + diff --git a/vendor/github.com/casbin/casbin/v2/README.md b/vendor/github.com/casbin/casbin/v2/README.md new file mode 100644 index 0000000000..36549f55f9 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/README.md @@ -0,0 +1,296 @@ +Casbin +==== + +[![Go Report Card](https://goreportcard.com/badge/github.com/casbin/casbin)](https://goreportcard.com/report/github.com/casbin/casbin) +[![Build](https://github.com/casbin/casbin/actions/workflows/default.yml/badge.svg)](https://github.com/casbin/casbin/actions/workflows/default.yml) +[![Coverage Status](https://coveralls.io/repos/github/casbin/casbin/badge.svg?branch=master)](https://coveralls.io/github/casbin/casbin?branch=master) +[![Godoc](https://godoc.org/github.com/casbin/casbin?status.svg)](https://pkg.go.dev/github.com/casbin/casbin/v2) +[![Release](https://img.shields.io/github/release/casbin/casbin.svg)](https://github.com/casbin/casbin/releases/latest) +[![Discord](https://img.shields.io/discord/1022748306096537660?logo=discord&label=discord&color=5865F2)](https://discord.gg/S5UjpzGZjN) +[![Sourcegraph](https://sourcegraph.com/github.com/casbin/casbin/-/badge.svg)](https://sourcegraph.com/github.com/casbin/casbin?badge) + +**News**: still worry about how to write the correct Casbin policy? ``Casbin online editor`` is coming to help! Try it at: https://casbin.org/editor/ + +![casbin Logo](casbin-logo.png) + +Casbin is a powerful and efficient open-source access control library for Golang projects. It provides support for enforcing authorization based on various [access control models](https://en.wikipedia.org/wiki/Computer_security_model). + +

+ Sponsored by +
+ + + + + + +
+ Build auth with fraud prevention, faster.
Try Stytch for API-first authentication, user & org management, multi-tenant SSO, MFA, device fingerprinting, and more.
+
+

+ +## All the languages supported by Casbin: + +| [![golang](https://casbin.org/img/langs/golang.png)](https://github.com/casbin/casbin) | [![java](https://casbin.org/img/langs/java.png)](https://github.com/casbin/jcasbin) | [![nodejs](https://casbin.org/img/langs/nodejs.png)](https://github.com/casbin/node-casbin) | [![php](https://casbin.org/img/langs/php.png)](https://github.com/php-casbin/php-casbin) | +|----------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------| +| [Casbin](https://github.com/casbin/casbin) | [jCasbin](https://github.com/casbin/jcasbin) | [node-Casbin](https://github.com/casbin/node-casbin) | [PHP-Casbin](https://github.com/php-casbin/php-casbin) | +| production-ready | production-ready | production-ready | production-ready | + +| [![python](https://casbin.org/img/langs/python.png)](https://github.com/casbin/pycasbin) | [![dotnet](https://casbin.org/img/langs/dotnet.png)](https://github.com/casbin-net/Casbin.NET) | [![c++](https://casbin.org/img/langs/cpp.png)](https://github.com/casbin/casbin-cpp) | [![rust](https://casbin.org/img/langs/rust.png)](https://github.com/casbin/casbin-rs) | +|------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------| +| [PyCasbin](https://github.com/casbin/pycasbin) | [Casbin.NET](https://github.com/casbin-net/Casbin.NET) | [Casbin-CPP](https://github.com/casbin/casbin-cpp) | [Casbin-RS](https://github.com/casbin/casbin-rs) | +| production-ready | production-ready | production-ready | production-ready | + +## Table of contents + +- [Supported models](#supported-models) +- [How it works?](#how-it-works) +- [Features](#features) +- [Installation](#installation) +- [Documentation](#documentation) +- [Online editor](#online-editor) +- [Tutorials](#tutorials) +- [Get started](#get-started) +- [Policy management](#policy-management) +- [Policy persistence](#policy-persistence) +- [Policy consistence between multiple nodes](#policy-consistence-between-multiple-nodes) +- [Role manager](#role-manager) +- [Benchmarks](#benchmarks) +- [Examples](#examples) +- [Middlewares](#middlewares) +- [Our adopters](#our-adopters) + +## Supported models + +1. [**ACL (Access Control List)**](https://en.wikipedia.org/wiki/Access_control_list) +2. **ACL with [superuser](https://en.wikipedia.org/wiki/Superuser)** +3. **ACL without users**: especially useful for systems that don't have authentication or user log-ins. +3. **ACL without resources**: some scenarios may target for a type of resources instead of an individual resource by using permissions like ``write-article``, ``read-log``. It doesn't control the access to a specific article or log. +4. **[RBAC (Role-Based Access Control)](https://en.wikipedia.org/wiki/Role-based_access_control)** +5. **RBAC with resource roles**: both users and resources can have roles (or groups) at the same time. +6. **RBAC with domains/tenants**: users can have different role sets for different domains/tenants. +7. **[ABAC (Attribute-Based Access Control)](https://en.wikipedia.org/wiki/Attribute-Based_Access_Control)**: syntax sugar like ``resource.Owner`` can be used to get the attribute for a resource. +8. **[RESTful](https://en.wikipedia.org/wiki/Representational_state_transfer)**: supports paths like ``/res/*``, ``/res/:id`` and HTTP methods like ``GET``, ``POST``, ``PUT``, ``DELETE``. +9. **Deny-override**: both allow and deny authorizations are supported, deny overrides the allow. +10. **Priority**: the policy rules can be prioritized like firewall rules. + +## How it works? + +In Casbin, an access control model is abstracted into a CONF file based on the **PERM metamodel (Policy, Effect, Request, Matchers)**. So switching or upgrading the authorization mechanism for a project is just as simple as modifying a configuration. You can customize your own access control model by combining the available models. For example, you can get RBAC roles and ABAC attributes together inside one model and share one set of policy rules. + +The most basic and simplest model in Casbin is ACL. ACL's model CONF is: + +```ini +# Request definition +[request_definition] +r = sub, obj, act + +# Policy definition +[policy_definition] +p = sub, obj, act + +# Policy effect +[policy_effect] +e = some(where (p.eft == allow)) + +# Matchers +[matchers] +m = r.sub == p.sub && r.obj == p.obj && r.act == p.act + +``` + +An example policy for ACL model is like: + +``` +p, alice, data1, read +p, bob, data2, write +``` + +It means: + +- alice can read data1 +- bob can write data2 + +We also support multi-line mode by appending '\\' in the end: + +```ini +# Matchers +[matchers] +m = r.sub == p.sub && r.obj == p.obj \ + && r.act == p.act +``` + +Further more, if you are using ABAC, you can try operator `in` like following in Casbin **golang** edition (jCasbin and Node-Casbin are not supported yet): + +```ini +# Matchers +[matchers] +m = r.obj == p.obj && r.act == p.act || r.obj in ('data2', 'data3') +``` + +But you **SHOULD** make sure that the length of the array is **MORE** than **1**, otherwise there will cause it to panic. + +For more operators, you may take a look at [govaluate](https://github.com/casbin/govaluate) + +## Features + +What Casbin does: + +1. enforce the policy in the classic ``{subject, object, action}`` form or a customized form as you defined, both allow and deny authorizations are supported. +2. handle the storage of the access control model and its policy. +3. manage the role-user mappings and role-role mappings (aka role hierarchy in RBAC). +4. support built-in superuser like ``root`` or ``administrator``. A superuser can do anything without explicit permissions. +5. multiple built-in operators to support the rule matching. For example, ``keyMatch`` can map a resource key ``/foo/bar`` to the pattern ``/foo*``. + +What Casbin does NOT do: + +1. authentication (aka verify ``username`` and ``password`` when a user logs in) +2. manage the list of users or roles. I believe it's more convenient for the project itself to manage these entities. Users usually have their passwords, and Casbin is not designed as a password container. However, Casbin stores the user-role mapping for the RBAC scenario. + +## Installation + +``` +go get github.com/casbin/casbin/v2 +``` + +## Documentation + +https://casbin.org/docs/overview + +## Online editor + +You can also use the online editor (https://casbin.org/editor/) to write your Casbin model and policy in your web browser. It provides functionality such as ``syntax highlighting`` and ``code completion``, just like an IDE for a programming language. + +## Tutorials + +https://casbin.org/docs/tutorials + +## Get started + +1. New a Casbin enforcer with a model file and a policy file: + + ```go + e, _ := casbin.NewEnforcer("path/to/model.conf", "path/to/policy.csv") + ``` + +Note: you can also initialize an enforcer with policy in DB instead of file, see [Policy-persistence](#policy-persistence) section for details. + +2. Add an enforcement hook into your code right before the access happens: + + ```go + sub := "alice" // the user that wants to access a resource. + obj := "data1" // the resource that is going to be accessed. + act := "read" // the operation that the user performs on the resource. + + if res, _ := e.Enforce(sub, obj, act); res { + // permit alice to read data1 + } else { + // deny the request, show an error + } + ``` + +3. Besides the static policy file, Casbin also provides API for permission management at run-time. For example, You can get all the roles assigned to a user as below: + + ```go + roles, _ := e.GetImplicitRolesForUser(sub) + ``` + +See [Policy management APIs](#policy-management) for more usage. + +## Policy management + +Casbin provides two sets of APIs to manage permissions: + +- [Management API](https://casbin.org/docs/management-api): the primitive API that provides full support for Casbin policy management. +- [RBAC API](https://casbin.org/docs/rbac-api): a more friendly API for RBAC. This API is a subset of Management API. The RBAC users could use this API to simplify the code. + +We also provide a [web-based UI](https://casbin.org/docs/admin-portal) for model management and policy management: + +![model editor](https://hsluoyz.github.io/casbin/ui_model_editor.png) + +![policy editor](https://hsluoyz.github.io/casbin/ui_policy_editor.png) + +## Policy persistence + +https://casbin.org/docs/adapters + +## Policy consistence between multiple nodes + +https://casbin.org/docs/watchers + +## Role manager + +https://casbin.org/docs/role-managers + +## Benchmarks + +https://casbin.org/docs/benchmark + +## Examples + +| Model | Model file | Policy file | +|---------------------------|----------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------| +| ACL | [basic_model.conf](https://github.com/casbin/casbin/blob/master/examples/basic_model.conf) | [basic_policy.csv](https://github.com/casbin/casbin/blob/master/examples/basic_policy.csv) | +| ACL with superuser | [basic_model_with_root.conf](https://github.com/casbin/casbin/blob/master/examples/basic_with_root_model.conf) | [basic_policy.csv](https://github.com/casbin/casbin/blob/master/examples/basic_policy.csv) | +| ACL without users | [basic_model_without_users.conf](https://github.com/casbin/casbin/blob/master/examples/basic_without_users_model.conf) | [basic_policy_without_users.csv](https://github.com/casbin/casbin/blob/master/examples/basic_without_users_policy.csv) | +| ACL without resources | [basic_model_without_resources.conf](https://github.com/casbin/casbin/blob/master/examples/basic_without_resources_model.conf) | [basic_policy_without_resources.csv](https://github.com/casbin/casbin/blob/master/examples/basic_without_resources_policy.csv) | +| RBAC | [rbac_model.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_model.conf) | [rbac_policy.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_policy.csv) | +| RBAC with resource roles | [rbac_model_with_resource_roles.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_with_resource_roles_model.conf) | [rbac_policy_with_resource_roles.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_with_resource_roles_policy.csv) | +| RBAC with domains/tenants | [rbac_model_with_domains.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_with_domains_model.conf) | [rbac_policy_with_domains.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_with_domains_policy.csv) | +| ABAC | [abac_model.conf](https://github.com/casbin/casbin/blob/master/examples/abac_model.conf) | N/A | +| RESTful | [keymatch_model.conf](https://github.com/casbin/casbin/blob/master/examples/keymatch_model.conf) | [keymatch_policy.csv](https://github.com/casbin/casbin/blob/master/examples/keymatch_policy.csv) | +| Deny-override | [rbac_model_with_deny.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_with_deny_model.conf) | [rbac_policy_with_deny.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_with_deny_policy.csv) | +| Priority | [priority_model.conf](https://github.com/casbin/casbin/blob/master/examples/priority_model.conf) | [priority_policy.csv](https://github.com/casbin/casbin/blob/master/examples/priority_policy.csv) | + +## Middlewares + +Authz middlewares for web frameworks: https://casbin.org/docs/middlewares + +## Our adopters + +https://casbin.org/docs/adopters + +## How to Contribute + +Please read the [contributing guide](CONTRIBUTING.md). + +## Contributors + +This project exists thanks to all the people who contribute. + + +## Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/casbin#backer)] + + + +## Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/casbin#sponsor)] + + + + + + + + + + + + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=casbin/casbin&type=Date)](https://star-history.com/#casbin/casbin&Date) + +## License + +This project is licensed under the [Apache 2.0 license](LICENSE). + +## Contact + +If you have any issues or feature requests, please contact us. PR is welcomed. +- https://github.com/casbin/casbin/issues +- hsluoyz@gmail.com +- Tencent QQ group: [546057381](//shang.qq.com/wpa/qunwpa?idkey=8ac8b91fc97ace3d383d0035f7aa06f7d670fd8e8d4837347354a31c18fac885) diff --git a/vendor/github.com/casbin/casbin/v2/casbin-logo.png b/vendor/github.com/casbin/casbin/v2/casbin-logo.png new file mode 100644 index 0000000000..7e5d1ecf95 Binary files /dev/null and b/vendor/github.com/casbin/casbin/v2/casbin-logo.png differ diff --git a/vendor/github.com/casbin/casbin/v2/config/config.go b/vendor/github.com/casbin/casbin/v2/config/config.go new file mode 100644 index 0000000000..57d40d8494 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/config/config.go @@ -0,0 +1,267 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +var ( + // DEFAULT_SECTION specifies the name of a section if no name provided. + DEFAULT_SECTION = "default" + // DEFAULT_COMMENT defines what character(s) indicate a comment `#`. + DEFAULT_COMMENT = []byte{'#'} + // DEFAULT_COMMENT_SEM defines what alternate character(s) indicate a comment `;`. + DEFAULT_COMMENT_SEM = []byte{';'} + // DEFAULT_MULTI_LINE_SEPARATOR defines what character indicates a multi-line content. + DEFAULT_MULTI_LINE_SEPARATOR = []byte{'\\'} +) + +// ConfigInterface defines the behavior of a Config implementation. +type ConfigInterface interface { + String(key string) string + Strings(key string) []string + Bool(key string) (bool, error) + Int(key string) (int, error) + Int64(key string) (int64, error) + Float64(key string) (float64, error) + Set(key string, value string) error +} + +// Config represents an implementation of the ConfigInterface. +type Config struct { + // Section:key=value + data map[string]map[string]string +} + +// NewConfig create an empty configuration representation from file. +func NewConfig(confName string) (ConfigInterface, error) { + c := &Config{ + data: make(map[string]map[string]string), + } + err := c.parse(confName) + return c, err +} + +// NewConfigFromText create an empty configuration representation from text. +func NewConfigFromText(text string) (ConfigInterface, error) { + c := &Config{ + data: make(map[string]map[string]string), + } + err := c.parseBuffer(bufio.NewReader(strings.NewReader(text))) + return c, err +} + +// AddConfig adds a new section->key:value to the configuration. +func (c *Config) AddConfig(section string, option string, value string) bool { + if section == "" { + section = DEFAULT_SECTION + } + + if _, ok := c.data[section]; !ok { + c.data[section] = make(map[string]string) + } + + _, ok := c.data[section][option] + c.data[section][option] = value + + return !ok +} + +func (c *Config) parse(fname string) (err error) { + f, err := os.Open(fname) + if err != nil { + return err + } + defer f.Close() + + buf := bufio.NewReader(f) + return c.parseBuffer(buf) +} + +func (c *Config) parseBuffer(buf *bufio.Reader) error { + var section string + var lineNum int + var buffer bytes.Buffer + var canWrite bool + for { + if canWrite { + if err := c.write(section, lineNum, &buffer); err != nil { + return err + } else { + canWrite = false + } + } + lineNum++ + line, _, err := buf.ReadLine() + if err == io.EOF { + // force write when buffer is not flushed yet + if buffer.Len() > 0 { + if err = c.write(section, lineNum, &buffer); err != nil { + return err + } + } + break + } else if err != nil { + return err + } + + line = bytes.TrimSpace(line) + switch { + case bytes.Equal(line, []byte{}), bytes.HasPrefix(line, DEFAULT_COMMENT_SEM), + bytes.HasPrefix(line, DEFAULT_COMMENT): + canWrite = true + continue + case bytes.HasPrefix(line, []byte{'['}) && bytes.HasSuffix(line, []byte{']'}): + // force write when buffer is not flushed yet + if buffer.Len() > 0 { + if err := c.write(section, lineNum, &buffer); err != nil { + return err + } + canWrite = false + } + section = string(line[1 : len(line)-1]) + default: + var p []byte + if bytes.HasSuffix(line, DEFAULT_MULTI_LINE_SEPARATOR) { + p = bytes.TrimSpace(line[:len(line)-1]) + p = append(p, " "...) + } else { + p = line + canWrite = true + } + + end := len(p) + for i, value := range p { + if value == DEFAULT_COMMENT[0] || value == DEFAULT_COMMENT_SEM[0] { + end = i + break + } + } + if _, err := buffer.Write(p[:end]); err != nil { + return err + } + } + } + + return nil +} + +func (c *Config) write(section string, lineNum int, b *bytes.Buffer) error { + if b.Len() <= 0 { + return nil + } + + optionVal := bytes.SplitN(b.Bytes(), []byte{'='}, 2) + if len(optionVal) != 2 { + return fmt.Errorf("parse the content error : line %d , %s = ? ", lineNum, optionVal[0]) + } + option := bytes.TrimSpace(optionVal[0]) + value := bytes.TrimSpace(optionVal[1]) + c.AddConfig(section, string(option), string(value)) + + // flush buffer after adding + b.Reset() + + return nil +} + +// Bool lookups up the value using the provided key and converts the value to a bool. +func (c *Config) Bool(key string) (bool, error) { + return strconv.ParseBool(c.get(key)) +} + +// Int lookups up the value using the provided key and converts the value to a int. +func (c *Config) Int(key string) (int, error) { + return strconv.Atoi(c.get(key)) +} + +// Int64 lookups up the value using the provided key and converts the value to a int64. +func (c *Config) Int64(key string) (int64, error) { + return strconv.ParseInt(c.get(key), 10, 64) +} + +// Float64 lookups up the value using the provided key and converts the value to a float64. +func (c *Config) Float64(key string) (float64, error) { + return strconv.ParseFloat(c.get(key), 64) +} + +// String lookups up the value using the provided key and converts the value to a string. +func (c *Config) String(key string) string { + return c.get(key) +} + +// Strings lookups up the value using the provided key and converts the value to an array of string +// by splitting the string by comma. +func (c *Config) Strings(key string) []string { + v := c.get(key) + if v == "" { + return nil + } + return strings.Split(v, ",") +} + +// Set sets the value for the specific key in the Config. +func (c *Config) Set(key string, value string) error { + if len(key) == 0 { + return errors.New("key is empty") + } + + var ( + section string + option string + ) + + keys := strings.Split(strings.ToLower(key), "::") + if len(keys) >= 2 { + section = keys[0] + option = keys[1] + } else { + option = keys[0] + } + + c.AddConfig(section, option, value) + return nil +} + +// section.key or key. +func (c *Config) get(key string) string { + var ( + section string + option string + ) + + keys := strings.Split(strings.ToLower(key), "::") + if len(keys) >= 2 { + section = keys[0] + option = keys[1] + } else { + section = DEFAULT_SECTION + option = keys[0] + } + + if value, ok := c.data[section][option]; ok { + return value + } + + return "" +} diff --git a/vendor/github.com/casbin/casbin/v2/constant/constants.go b/vendor/github.com/casbin/casbin/v2/constant/constants.go new file mode 100644 index 0000000000..4140ecf3fa --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/constant/constants.go @@ -0,0 +1,31 @@ +// Copyright 2022 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package constant + +const ( + ActionIndex = "act" + DomainIndex = "dom" + SubjectIndex = "sub" + ObjectIndex = "obj" + PriorityIndex = "priority" +) + +const ( + AllowOverrideEffect = "some(where (p_eft == allow))" + DenyOverrideEffect = "!some(where (p_eft == deny))" + AllowAndDenyEffect = "some(where (p_eft == allow)) && !some(where (p_eft == deny))" + PriorityEffect = "priority(p_eft) || deny" + SubjectPriorityEffect = "subjectPriority(p_eft) || deny" +) diff --git a/vendor/github.com/casbin/casbin/v2/effector/default_effector.go b/vendor/github.com/casbin/casbin/v2/effector/default_effector.go new file mode 100644 index 0000000000..feb083a68e --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/effector/default_effector.go @@ -0,0 +1,109 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package effector + +import ( + "errors" + + "github.com/casbin/casbin/v2/constant" +) + +// DefaultEffector is default effector for Casbin. +type DefaultEffector struct { +} + +// NewDefaultEffector is the constructor for DefaultEffector. +func NewDefaultEffector() *DefaultEffector { + e := DefaultEffector{} + return &e +} + +// MergeEffects merges all matching results collected by the enforcer into a single decision. +func (e *DefaultEffector) MergeEffects(expr string, effects []Effect, matches []float64, policyIndex int, policyLength int) (Effect, int, error) { + result := Indeterminate + explainIndex := -1 + + switch expr { + case constant.AllowOverrideEffect: + if matches[policyIndex] == 0 { + break + } + // only check the current policyIndex + if effects[policyIndex] == Allow { + result = Allow + explainIndex = policyIndex + break + } + case constant.DenyOverrideEffect: + // only check the current policyIndex + if matches[policyIndex] != 0 && effects[policyIndex] == Deny { + result = Deny + explainIndex = policyIndex + break + } + // if no deny rules are matched at last, then allow + if policyIndex == policyLength-1 { + result = Allow + } + case constant.AllowAndDenyEffect: + // short-circuit if matched deny rule + if matches[policyIndex] != 0 && effects[policyIndex] == Deny { + result = Deny + // set hit rule to the (first) matched deny rule + explainIndex = policyIndex + break + } + + // short-circuit some effects in the middle + if policyIndex < policyLength-1 { + // choose not to short-circuit + return result, explainIndex, nil + } + // merge all effects at last + for i, eft := range effects { + if matches[i] == 0 { + continue + } + + if eft == Allow { + result = Allow + // set hit rule to first matched allow rule + explainIndex = i + break + } + } + case constant.PriorityEffect, constant.SubjectPriorityEffect: + // reverse merge, short-circuit may be earlier + for i := len(effects) - 1; i >= 0; i-- { + if matches[i] == 0 { + continue + } + + if effects[i] != Indeterminate { + if effects[i] == Allow { + result = Allow + } else { + result = Deny + } + explainIndex = i + break + } + } + default: + return Deny, -1, errors.New("unsupported effect") + } + + return result, explainIndex, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/effector/effector.go b/vendor/github.com/casbin/casbin/v2/effector/effector.go new file mode 100644 index 0000000000..49b84c3e14 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/effector/effector.go @@ -0,0 +1,31 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package effector //nolint:cyclop // TODO + +// Effect is the result for a policy rule. +type Effect int + +// Values for policy effect. +const ( + Allow Effect = iota + Indeterminate + Deny +) + +// Effector is the interface for Casbin effectors. +type Effector interface { + // MergeEffects merges all matching results collected by the enforcer into a single decision. + MergeEffects(expr string, effects []Effect, matches []float64, policyIndex int, policyLength int) (Effect, int, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer.go b/vendor/github.com/casbin/casbin/v2/enforcer.go new file mode 100644 index 0000000000..fd3f43a8d8 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer.go @@ -0,0 +1,1004 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "errors" + "fmt" + "runtime/debug" + "strings" + "sync" + + "github.com/casbin/casbin/v2/effector" + "github.com/casbin/casbin/v2/log" + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" + fileadapter "github.com/casbin/casbin/v2/persist/file-adapter" + "github.com/casbin/casbin/v2/rbac" + defaultrolemanager "github.com/casbin/casbin/v2/rbac/default-role-manager" + "github.com/casbin/casbin/v2/util" + + "github.com/casbin/govaluate" +) + +// Enforcer is the main interface for authorization enforcement and policy management. +type Enforcer struct { + modelPath string + model model.Model + fm model.FunctionMap + eft effector.Effector + + adapter persist.Adapter + watcher persist.Watcher + dispatcher persist.Dispatcher + rmMap map[string]rbac.RoleManager + condRmMap map[string]rbac.ConditionalRoleManager + matcherMap sync.Map + + enabled bool + autoSave bool + autoBuildRoleLinks bool + autoNotifyWatcher bool + autoNotifyDispatcher bool + acceptJsonRequest bool + + logger log.Logger +} + +// EnforceContext is used as the first element of the parameter "rvals" in method "enforce". +type EnforceContext struct { + RType string + PType string + EType string + MType string +} + +func (e EnforceContext) GetCacheKey() string { + return "EnforceContext{" + e.RType + "-" + e.PType + "-" + e.EType + "-" + e.MType + "}" +} + +// NewEnforcer creates an enforcer via file or DB. +// +// File: +// +// e := casbin.NewEnforcer("path/to/basic_model.conf", "path/to/basic_policy.csv") +// +// MySQL DB: +// +// a := mysqladapter.NewDBAdapter("mysql", "mysql_username:mysql_password@tcp(127.0.0.1:3306)/") +// e := casbin.NewEnforcer("path/to/basic_model.conf", a) +func NewEnforcer(params ...interface{}) (*Enforcer, error) { + e := &Enforcer{logger: &log.DefaultLogger{}} + + parsedParamLen := 0 + paramLen := len(params) + if paramLen >= 1 { + enableLog, ok := params[paramLen-1].(bool) + if ok { + e.EnableLog(enableLog) + parsedParamLen++ + } + } + + if paramLen-parsedParamLen >= 1 { + logger, ok := params[paramLen-parsedParamLen-1].(log.Logger) + if ok { + e.logger = logger + parsedParamLen++ + } + } + + switch paramLen - parsedParamLen { + case 2: + switch p0 := params[0].(type) { + case string: + switch p1 := params[1].(type) { + case string: + err := e.InitWithFile(p0, p1) + if err != nil { + return nil, err + } + default: + err := e.InitWithAdapter(p0, p1.(persist.Adapter)) + if err != nil { + return nil, err + } + } + default: + switch params[1].(type) { + case string: + return nil, errors.New("invalid parameters for enforcer") + default: + err := e.InitWithModelAndAdapter(p0.(model.Model), params[1].(persist.Adapter)) + if err != nil { + return nil, err + } + } + } + case 1: + switch p0 := params[0].(type) { + case string: + err := e.InitWithFile(p0, "") + if err != nil { + return nil, err + } + default: + err := e.InitWithModelAndAdapter(p0.(model.Model), nil) + if err != nil { + return nil, err + } + } + case 0: + return e, nil + default: + return nil, errors.New("invalid parameters for enforcer") + } + + return e, nil +} + +// InitWithFile initializes an enforcer with a model file and a policy file. +func (e *Enforcer) InitWithFile(modelPath string, policyPath string) error { + a := fileadapter.NewAdapter(policyPath) + return e.InitWithAdapter(modelPath, a) +} + +// InitWithAdapter initializes an enforcer with a database adapter. +func (e *Enforcer) InitWithAdapter(modelPath string, adapter persist.Adapter) error { + m, err := model.NewModelFromFile(modelPath) + if err != nil { + return err + } + + err = e.InitWithModelAndAdapter(m, adapter) + if err != nil { + return err + } + + e.modelPath = modelPath + return nil +} + +// InitWithModelAndAdapter initializes an enforcer with a model and a database adapter. +func (e *Enforcer) InitWithModelAndAdapter(m model.Model, adapter persist.Adapter) error { + e.adapter = adapter + + e.model = m + m.SetLogger(e.logger) + e.model.PrintModel() + e.fm = model.LoadFunctionMap() + + e.initialize() + + // Do not initialize the full policy when using a filtered adapter + fa, ok := e.adapter.(persist.FilteredAdapter) + if e.adapter != nil && (!ok || ok && !fa.IsFiltered()) { + err := e.LoadPolicy() + if err != nil { + return err + } + } + + return nil +} + +// SetLogger changes the current enforcer's logger. +func (e *Enforcer) SetLogger(logger log.Logger) { + e.logger = logger + e.model.SetLogger(e.logger) + for k := range e.rmMap { + e.rmMap[k].SetLogger(e.logger) + } + for k := range e.condRmMap { + e.condRmMap[k].SetLogger(e.logger) + } +} + +func (e *Enforcer) initialize() { + e.rmMap = map[string]rbac.RoleManager{} + e.condRmMap = map[string]rbac.ConditionalRoleManager{} + e.eft = effector.NewDefaultEffector() + e.watcher = nil + e.matcherMap = sync.Map{} + + e.enabled = true + e.autoSave = true + e.autoBuildRoleLinks = true + e.autoNotifyWatcher = true + e.autoNotifyDispatcher = true + e.initRmMap() +} + +// LoadModel reloads the model from the model CONF file. +// Because the policy is attached to a model, so the policy is invalidated and needs to be reloaded by calling LoadPolicy(). +func (e *Enforcer) LoadModel() error { + var err error + e.model, err = model.NewModelFromFile(e.modelPath) + if err != nil { + return err + } + e.model.SetLogger(e.logger) + + e.model.PrintModel() + e.fm = model.LoadFunctionMap() + + e.initialize() + + return nil +} + +// GetModel gets the current model. +func (e *Enforcer) GetModel() model.Model { + return e.model +} + +// SetModel sets the current model. +func (e *Enforcer) SetModel(m model.Model) { + e.model = m + e.fm = model.LoadFunctionMap() + + e.model.SetLogger(e.logger) + e.initialize() +} + +// GetAdapter gets the current adapter. +func (e *Enforcer) GetAdapter() persist.Adapter { + return e.adapter +} + +// SetAdapter sets the current adapter. +func (e *Enforcer) SetAdapter(adapter persist.Adapter) { + e.adapter = adapter +} + +// SetWatcher sets the current watcher. +func (e *Enforcer) SetWatcher(watcher persist.Watcher) error { + e.watcher = watcher + if _, ok := e.watcher.(persist.WatcherEx); ok { + // The callback of WatcherEx has no generic implementation. + return nil + } else { + // In case the Watcher wants to use a customized callback function, call `SetUpdateCallback` after `SetWatcher`. + return watcher.SetUpdateCallback(func(string) { _ = e.LoadPolicy() }) + } +} + +// GetRoleManager gets the current role manager. +func (e *Enforcer) GetRoleManager() rbac.RoleManager { + if e.rmMap != nil && e.rmMap["g"] != nil { + return e.rmMap["g"] + } else { + return nil + } +} + +// GetNamedRoleManager gets the role manager for the named policy. +func (e *Enforcer) GetNamedRoleManager(ptype string) rbac.RoleManager { + if e.rmMap != nil && e.rmMap[ptype] != nil { + return e.rmMap[ptype] + } else { + return nil + } +} + +// SetRoleManager sets the current role manager. +func (e *Enforcer) SetRoleManager(rm rbac.RoleManager) { + e.invalidateMatcherMap() + e.rmMap["g"] = rm +} + +// SetNamedRoleManager sets the role manager for the named policy. +func (e *Enforcer) SetNamedRoleManager(ptype string, rm rbac.RoleManager) { + e.invalidateMatcherMap() + e.rmMap[ptype] = rm +} + +// SetEffector sets the current effector. +func (e *Enforcer) SetEffector(eft effector.Effector) { + e.eft = eft +} + +// ClearPolicy clears all policy. +func (e *Enforcer) ClearPolicy() { + e.invalidateMatcherMap() + + if e.dispatcher != nil && e.autoNotifyDispatcher { + _ = e.dispatcher.ClearPolicy() + return + } + e.model.ClearPolicy() +} + +// LoadPolicy reloads the policy from file/database. +func (e *Enforcer) LoadPolicy() error { + newModel, err := e.loadPolicyFromAdapter(e.model) + if err != nil { + return err + } + err = e.applyModifiedModel(newModel) + if err != nil { + return err + } + return nil +} + +func (e *Enforcer) loadPolicyFromAdapter(baseModel model.Model) (model.Model, error) { + newModel := baseModel.Copy() + newModel.ClearPolicy() + + if err := e.adapter.LoadPolicy(newModel); err != nil && err.Error() != "invalid file path, file path cannot be empty" { + return nil, err + } + + if err := newModel.SortPoliciesBySubjectHierarchy(); err != nil { + return nil, err + } + + if err := newModel.SortPoliciesByPriority(); err != nil { + return nil, err + } + + return newModel, nil +} + +func (e *Enforcer) applyModifiedModel(newModel model.Model) error { + var err error + needToRebuild := false + defer func() { + if err != nil { + if e.autoBuildRoleLinks && needToRebuild { + _ = e.BuildRoleLinks() + } + } + }() + + if e.autoBuildRoleLinks { + needToRebuild = true + + if err := e.rebuildRoleLinks(newModel); err != nil { + return err + } + + if err := e.rebuildConditionalRoleLinks(newModel); err != nil { + return err + } + } + + e.model = newModel + e.invalidateMatcherMap() + return nil +} + +func (e *Enforcer) rebuildRoleLinks(newModel model.Model) error { + if len(e.rmMap) != 0 { + for _, rm := range e.rmMap { + err := rm.Clear() + if err != nil { + return err + } + } + + err := newModel.BuildRoleLinks(e.rmMap) + if err != nil { + return err + } + } + + return nil +} + +func (e *Enforcer) rebuildConditionalRoleLinks(newModel model.Model) error { + if len(e.condRmMap) != 0 { + for _, crm := range e.condRmMap { + err := crm.Clear() + if err != nil { + return err + } + } + + err := newModel.BuildConditionalRoleLinks(e.condRmMap) + if err != nil { + return err + } + } + return nil +} + +func (e *Enforcer) loadFilteredPolicy(filter interface{}) error { + e.invalidateMatcherMap() + + var filteredAdapter persist.FilteredAdapter + + // Attempt to cast the Adapter as a FilteredAdapter + switch adapter := e.adapter.(type) { + case persist.FilteredAdapter: + filteredAdapter = adapter + default: + return errors.New("filtered policies are not supported by this adapter") + } + if err := filteredAdapter.LoadFilteredPolicy(e.model, filter); err != nil && err.Error() != "invalid file path, file path cannot be empty" { + return err + } + + if err := e.model.SortPoliciesBySubjectHierarchy(); err != nil { + return err + } + + if err := e.model.SortPoliciesByPriority(); err != nil { + return err + } + + e.initRmMap() + e.model.PrintPolicy() + if e.autoBuildRoleLinks { + err := e.BuildRoleLinks() + if err != nil { + return err + } + } + return nil +} + +// LoadFilteredPolicy reloads a filtered policy from file/database. +func (e *Enforcer) LoadFilteredPolicy(filter interface{}) error { + e.model.ClearPolicy() + + return e.loadFilteredPolicy(filter) +} + +// LoadIncrementalFilteredPolicy append a filtered policy from file/database. +func (e *Enforcer) LoadIncrementalFilteredPolicy(filter interface{}) error { + return e.loadFilteredPolicy(filter) +} + +// IsFiltered returns true if the loaded policy has been filtered. +func (e *Enforcer) IsFiltered() bool { + filteredAdapter, ok := e.adapter.(persist.FilteredAdapter) + if !ok { + return false + } + return filteredAdapter.IsFiltered() +} + +// SavePolicy saves the current policy (usually after changed with Casbin API) back to file/database. +func (e *Enforcer) SavePolicy() error { + if e.IsFiltered() { + return errors.New("cannot save a filtered policy") + } + if err := e.adapter.SavePolicy(e.model); err != nil { + return err + } + if e.watcher != nil { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForSavePolicy(e.model) + } else { + err = e.watcher.Update() + } + return err + } + return nil +} + +func (e *Enforcer) initRmMap() { + for ptype, assertion := range e.model["g"] { + if rm, ok := e.rmMap[ptype]; ok { + _ = rm.Clear() + continue + } + if len(assertion.Tokens) <= 2 && len(assertion.ParamsTokens) == 0 { + assertion.RM = defaultrolemanager.NewRoleManagerImpl(10) + e.rmMap[ptype] = assertion.RM + } + if len(assertion.Tokens) <= 2 && len(assertion.ParamsTokens) != 0 { + assertion.CondRM = defaultrolemanager.NewConditionalRoleManager(10) + e.condRmMap[ptype] = assertion.CondRM + } + if len(assertion.Tokens) > 2 { + if len(assertion.ParamsTokens) == 0 { + assertion.RM = defaultrolemanager.NewRoleManager(10) + e.rmMap[ptype] = assertion.RM + } else { + assertion.CondRM = defaultrolemanager.NewConditionalDomainManager(10) + e.condRmMap[ptype] = assertion.CondRM + } + matchFun := "keyMatch(r_dom, p_dom)" + if strings.Contains(e.model["m"]["m"].Value, matchFun) { + e.AddNamedDomainMatchingFunc(ptype, "g", util.KeyMatch) + } + } + } +} + +// EnableEnforce changes the enforcing state of Casbin, when Casbin is disabled, all access will be allowed by the Enforce() function. +func (e *Enforcer) EnableEnforce(enable bool) { + e.enabled = enable +} + +// EnableLog changes whether Casbin will log messages to the Logger. +func (e *Enforcer) EnableLog(enable bool) { + e.logger.EnableLog(enable) +} + +// IsLogEnabled returns the current logger's enabled status. +func (e *Enforcer) IsLogEnabled() bool { + return e.logger.IsEnabled() +} + +// EnableAutoNotifyWatcher controls whether to save a policy rule automatically notify the Watcher when it is added or removed. +func (e *Enforcer) EnableAutoNotifyWatcher(enable bool) { + e.autoNotifyWatcher = enable +} + +// EnableAutoNotifyDispatcher controls whether to save a policy rule automatically notify the Dispatcher when it is added or removed. +func (e *Enforcer) EnableAutoNotifyDispatcher(enable bool) { + e.autoNotifyDispatcher = enable +} + +// EnableAutoSave controls whether to save a policy rule automatically to the adapter when it is added or removed. +func (e *Enforcer) EnableAutoSave(autoSave bool) { + e.autoSave = autoSave +} + +// EnableAutoBuildRoleLinks controls whether to rebuild the role inheritance relations when a role is added or deleted. +func (e *Enforcer) EnableAutoBuildRoleLinks(autoBuildRoleLinks bool) { + e.autoBuildRoleLinks = autoBuildRoleLinks +} + +// EnableAcceptJsonRequest controls whether to accept json as a request parameter. +func (e *Enforcer) EnableAcceptJsonRequest(acceptJsonRequest bool) { + e.acceptJsonRequest = acceptJsonRequest +} + +// BuildRoleLinks manually rebuild the role inheritance relations. +func (e *Enforcer) BuildRoleLinks() error { + if e.rmMap == nil { + return errors.New("rmMap is nil") + } + for _, rm := range e.rmMap { + err := rm.Clear() + if err != nil { + return err + } + } + + return e.model.BuildRoleLinks(e.rmMap) +} + +// BuildIncrementalRoleLinks provides incremental build the role inheritance relations. +func (e *Enforcer) BuildIncrementalRoleLinks(op model.PolicyOp, ptype string, rules [][]string) error { + e.invalidateMatcherMap() + return e.model.BuildIncrementalRoleLinks(e.rmMap, op, "g", ptype, rules) +} + +// BuildIncrementalConditionalRoleLinks provides incremental build the role inheritance relations with conditions. +func (e *Enforcer) BuildIncrementalConditionalRoleLinks(op model.PolicyOp, ptype string, rules [][]string) error { + e.invalidateMatcherMap() + return e.model.BuildIncrementalConditionalRoleLinks(e.condRmMap, op, "g", ptype, rules) +} + +// NewEnforceContext Create a default structure based on the suffix. +func NewEnforceContext(suffix string) EnforceContext { + return EnforceContext{ + RType: "r" + suffix, + PType: "p" + suffix, + EType: "e" + suffix, + MType: "m" + suffix, + } +} + +func (e *Enforcer) invalidateMatcherMap() { + e.matcherMap = sync.Map{} +} + +// enforce use a custom matcher to decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (matcher, sub, obj, act), use model matcher by default when matcher is "". +func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interface{}) (ok bool, err error) { //nolint:funlen,cyclop,gocyclo // TODO: reduce function complexity + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic: %v\n%s", r, debug.Stack()) + } + }() + + if !e.enabled { + return true, nil + } + + functions := e.fm.GetFunctions() + if _, ok := e.model["g"]; ok { + for key, ast := range e.model["g"] { + // g must be a normal role definition (ast.RM != nil) + // or a conditional role definition (ast.CondRM != nil) + // ast.RM and ast.CondRM shouldn't be nil at the same time + if ast.RM != nil { + functions[key] = util.GenerateGFunction(ast.RM) + } + if ast.CondRM != nil { + functions[key] = util.GenerateConditionalGFunction(ast.CondRM) + } + } + } + + var ( + rType = "r" + pType = "p" + eType = "e" + mType = "m" + ) + if len(rvals) != 0 { + switch rvals[0].(type) { + case EnforceContext: + enforceContext := rvals[0].(EnforceContext) + rType = enforceContext.RType + pType = enforceContext.PType + eType = enforceContext.EType + mType = enforceContext.MType + rvals = rvals[1:] + default: + break + } + } + + var expString string + if matcher == "" { + expString = e.model["m"][mType].Value + } else { + expString = util.RemoveComments(util.EscapeAssertion(matcher)) + } + + rTokens := make(map[string]int, len(e.model["r"][rType].Tokens)) + for i, token := range e.model["r"][rType].Tokens { + rTokens[token] = i + } + pTokens := make(map[string]int, len(e.model["p"][pType].Tokens)) + for i, token := range e.model["p"][pType].Tokens { + pTokens[token] = i + } + + if e.acceptJsonRequest { + // try to parse all request values from json to map[string]interface{} + // skip if there is an error + for i, rval := range rvals { + switch rval := rval.(type) { + case string: + var mapValue map[string]interface{} + mapValue, err = util.JsonToMap(rval) + if err == nil { + rvals[i] = mapValue + } + } + } + } + + parameters := enforceParameters{ + rTokens: rTokens, + rVals: rvals, + + pTokens: pTokens, + } + + hasEval := util.HasEval(expString) + if hasEval { + functions["eval"] = generateEvalFunction(functions, ¶meters) + } + var expression *govaluate.EvaluableExpression + expression, err = e.getAndStoreMatcherExpression(hasEval, expString, functions) + if err != nil { + return false, err + } + + if len(e.model["r"][rType].Tokens) != len(rvals) { + return false, fmt.Errorf( + "invalid request size: expected %d, got %d, rvals: %v", + len(e.model["r"][rType].Tokens), + len(rvals), + rvals) + } + + var policyEffects []effector.Effect + var matcherResults []float64 + + var effect effector.Effect + var explainIndex int + + if policyLen := len(e.model["p"][pType].Policy); policyLen != 0 && strings.Contains(expString, pType+"_") { //nolint:nestif // TODO: reduce function complexity + policyEffects = make([]effector.Effect, policyLen) + matcherResults = make([]float64, policyLen) + + for policyIndex, pvals := range e.model["p"][pType].Policy { + // log.LogPrint("Policy Rule: ", pvals) + if len(e.model["p"][pType].Tokens) != len(pvals) { + return false, fmt.Errorf( + "invalid policy size: expected %d, got %d, pvals: %v", + len(e.model["p"][pType].Tokens), + len(pvals), + pvals) + } + + parameters.pVals = pvals + + result, err := expression.Eval(parameters) + // log.LogPrint("Result: ", result) + + if err != nil { + return false, err + } + + // set to no-match at first + matcherResults[policyIndex] = 0 + switch result := result.(type) { + case bool: + if result { + matcherResults[policyIndex] = 1 + } + case float64: + if result != 0 { + matcherResults[policyIndex] = 1 + } + default: + return false, errors.New("matcher result should be bool, int or float") + } + + if j, ok := parameters.pTokens[pType+"_eft"]; ok { + eft := parameters.pVals[j] + if eft == "allow" { + policyEffects[policyIndex] = effector.Allow + } else if eft == "deny" { + policyEffects[policyIndex] = effector.Deny + } else { + policyEffects[policyIndex] = effector.Indeterminate + } + } else { + policyEffects[policyIndex] = effector.Allow + } + + // if e.model["e"]["e"].Value == "priority(p_eft) || deny" { + // break + // } + + effect, explainIndex, err = e.eft.MergeEffects(e.model["e"][eType].Value, policyEffects, matcherResults, policyIndex, policyLen) + if err != nil { + return false, err + } + if effect != effector.Indeterminate { + break + } + } + } else { + if hasEval && len(e.model["p"][pType].Policy) == 0 { + return false, errors.New("please make sure rule exists in policy when using eval() in matcher") + } + + policyEffects = make([]effector.Effect, 1) + matcherResults = make([]float64, 1) + matcherResults[0] = 1 + + parameters.pVals = make([]string, len(parameters.pTokens)) + + result, err := expression.Eval(parameters) + + if err != nil { + return false, err + } + + if result.(bool) { + policyEffects[0] = effector.Allow + } else { + policyEffects[0] = effector.Indeterminate + } + + effect, explainIndex, err = e.eft.MergeEffects(e.model["e"][eType].Value, policyEffects, matcherResults, 0, 1) + if err != nil { + return false, err + } + } + + var logExplains [][]string + + if explains != nil { + if len(*explains) > 0 { + logExplains = append(logExplains, *explains) + } + + if explainIndex != -1 && len(e.model["p"][pType].Policy) > explainIndex { + *explains = e.model["p"][pType].Policy[explainIndex] + logExplains = append(logExplains, *explains) + } + } + + // effect -> result + result := false + if effect == effector.Allow { + result = true + } + e.logger.LogEnforce(expString, rvals, result, logExplains) + + return result, nil +} + +func (e *Enforcer) getAndStoreMatcherExpression(hasEval bool, expString string, functions map[string]govaluate.ExpressionFunction) (*govaluate.EvaluableExpression, error) { + var expression *govaluate.EvaluableExpression + var err error + var cachedExpression, isPresent = e.matcherMap.Load(expString) + + if !hasEval && isPresent { + expression = cachedExpression.(*govaluate.EvaluableExpression) + } else { + expression, err = govaluate.NewEvaluableExpressionWithFunctions(expString, functions) + if err != nil { + return nil, err + } + e.matcherMap.Store(expString, expression) + } + return expression, nil +} + +// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). +func (e *Enforcer) Enforce(rvals ...interface{}) (bool, error) { + return e.enforce("", nil, rvals...) +} + +// EnforceWithMatcher use a custom matcher to decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (matcher, sub, obj, act), use model matcher by default when matcher is "". +func (e *Enforcer) EnforceWithMatcher(matcher string, rvals ...interface{}) (bool, error) { + return e.enforce(matcher, nil, rvals...) +} + +// EnforceEx explain enforcement by informing matched rules. +func (e *Enforcer) EnforceEx(rvals ...interface{}) (bool, []string, error) { + explain := []string{} + result, err := e.enforce("", &explain, rvals...) + return result, explain, err +} + +// EnforceExWithMatcher use a custom matcher and explain enforcement by informing matched rules. +func (e *Enforcer) EnforceExWithMatcher(matcher string, rvals ...interface{}) (bool, []string, error) { + explain := []string{} + result, err := e.enforce(matcher, &explain, rvals...) + return result, explain, err +} + +// BatchEnforce enforce in batches. +func (e *Enforcer) BatchEnforce(requests [][]interface{}) ([]bool, error) { + var results []bool + for _, request := range requests { + result, err := e.enforce("", nil, request...) + if err != nil { + return results, err + } + results = append(results, result) + } + return results, nil +} + +// BatchEnforceWithMatcher enforce with matcher in batches. +func (e *Enforcer) BatchEnforceWithMatcher(matcher string, requests [][]interface{}) ([]bool, error) { + var results []bool + for _, request := range requests { + result, err := e.enforce(matcher, nil, request...) + if err != nil { + return results, err + } + results = append(results, result) + } + return results, nil +} + +// AddNamedMatchingFunc add MatchingFunc by ptype RoleManager. +func (e *Enforcer) AddNamedMatchingFunc(ptype, name string, fn rbac.MatchingFunc) bool { + if rm, ok := e.rmMap[ptype]; ok { + rm.AddMatchingFunc(name, fn) + return true + } + return false +} + +// AddNamedDomainMatchingFunc add MatchingFunc by ptype to RoleManager. +func (e *Enforcer) AddNamedDomainMatchingFunc(ptype, name string, fn rbac.MatchingFunc) bool { + if rm, ok := e.rmMap[ptype]; ok { + rm.AddDomainMatchingFunc(name, fn) + return true + } + return false +} + +// AddNamedLinkConditionFunc Add condition function fn for Link userName->roleName, +// when fn returns true, Link is valid, otherwise invalid. +func (e *Enforcer) AddNamedLinkConditionFunc(ptype, user, role string, fn rbac.LinkConditionFunc) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.AddLinkConditionFunc(user, role, fn) + return true + } + return false +} + +// AddNamedDomainLinkConditionFunc Add condition function fn for Link userName-> {roleName, domain}, +// when fn returns true, Link is valid, otherwise invalid. +func (e *Enforcer) AddNamedDomainLinkConditionFunc(ptype, user, role string, domain string, fn rbac.LinkConditionFunc) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.AddDomainLinkConditionFunc(user, role, domain, fn) + return true + } + return false +} + +// SetNamedLinkConditionFuncParams Sets the parameters of the condition function fn for Link userName->roleName. +func (e *Enforcer) SetNamedLinkConditionFuncParams(ptype, user, role string, params ...string) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.SetLinkConditionFuncParams(user, role, params...) + return true + } + return false +} + +// SetNamedDomainLinkConditionFuncParams Sets the parameters of the condition function fn +// for Link userName->{roleName, domain}. +func (e *Enforcer) SetNamedDomainLinkConditionFuncParams(ptype, user, role, domain string, params ...string) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.SetDomainLinkConditionFuncParams(user, role, domain, params...) + return true + } + return false +} + +// assumes bounds have already been checked. +type enforceParameters struct { + rTokens map[string]int + rVals []interface{} + + pTokens map[string]int + pVals []string +} + +// implements govaluate.Parameters. +func (p enforceParameters) Get(name string) (interface{}, error) { + if name == "" { + return nil, nil + } + + switch name[0] { + case 'p': + i, ok := p.pTokens[name] + if !ok { + return nil, errors.New("No parameter '" + name + "' found.") + } + return p.pVals[i], nil + case 'r': + i, ok := p.rTokens[name] + if !ok { + return nil, errors.New("No parameter '" + name + "' found.") + } + return p.rVals[i], nil + default: + return nil, errors.New("No parameter '" + name + "' found.") + } +} + +func generateEvalFunction(functions map[string]govaluate.ExpressionFunction, parameters *enforceParameters) govaluate.ExpressionFunction { + return func(args ...interface{}) (interface{}, error) { + if len(args) != 1 { + return nil, fmt.Errorf("function eval(subrule string) expected %d arguments, but got %d", 1, len(args)) + } + + expression, ok := args[0].(string) + if !ok { + return nil, errors.New("argument of eval(subrule string) must be a string") + } + expression = util.EscapeAssertion(expression) + expr, err := govaluate.NewEvaluableExpressionWithFunctions(expression, functions) + if err != nil { + return nil, fmt.Errorf("error while parsing eval parameter: %s, %s", expression, err.Error()) + } + return expr.Eval(parameters) + } +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_cached.go b/vendor/github.com/casbin/casbin/v2/enforcer_cached.go new file mode 100644 index 0000000000..b89bad78d3 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer_cached.go @@ -0,0 +1,185 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/casbin/casbin/v2/persist/cache" +) + +// CachedEnforcer wraps Enforcer and provides decision cache. +type CachedEnforcer struct { + *Enforcer + expireTime time.Duration + cache cache.Cache + enableCache int32 + locker *sync.RWMutex +} + +type CacheableParam interface { + GetCacheKey() string +} + +// NewCachedEnforcer creates a cached enforcer via file or DB. +func NewCachedEnforcer(params ...interface{}) (*CachedEnforcer, error) { + e := &CachedEnforcer{} + var err error + e.Enforcer, err = NewEnforcer(params...) + if err != nil { + return nil, err + } + + e.enableCache = 1 + e.cache, _ = cache.NewDefaultCache() + e.locker = new(sync.RWMutex) + return e, nil +} + +// EnableCache determines whether to enable cache on Enforce(). When enableCache is enabled, cached result (true | false) will be returned for previous decisions. +func (e *CachedEnforcer) EnableCache(enableCache bool) { + var enabled int32 + if enableCache { + enabled = 1 + } + atomic.StoreInt32(&e.enableCache, enabled) +} + +// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). +// if rvals is not string , ignore the cache. +func (e *CachedEnforcer) Enforce(rvals ...interface{}) (bool, error) { + if atomic.LoadInt32(&e.enableCache) == 0 { + return e.Enforcer.Enforce(rvals...) + } + + key, ok := e.getKey(rvals...) + if !ok { + return e.Enforcer.Enforce(rvals...) + } + + if res, err := e.getCachedResult(key); err == nil { + return res, nil + } else if err != cache.ErrNoSuchKey { + return res, err + } + + res, err := e.Enforcer.Enforce(rvals...) + if err != nil { + return false, err + } + + err = e.setCachedResult(key, res, e.expireTime) + return res, err +} + +func (e *CachedEnforcer) LoadPolicy() error { + if atomic.LoadInt32(&e.enableCache) != 0 { + if err := e.cache.Clear(); err != nil { + return err + } + } + return e.Enforcer.LoadPolicy() +} + +func (e *CachedEnforcer) RemovePolicy(params ...interface{}) (bool, error) { + if atomic.LoadInt32(&e.enableCache) != 0 { + key, ok := e.getKey(params...) + if ok { + if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { + return false, err + } + } + } + return e.Enforcer.RemovePolicy(params...) +} + +func (e *CachedEnforcer) RemovePolicies(rules [][]string) (bool, error) { + if len(rules) != 0 { + if atomic.LoadInt32(&e.enableCache) != 0 { + irule := make([]interface{}, len(rules[0])) + for _, rule := range rules { + for i, param := range rule { + irule[i] = param + } + key, _ := e.getKey(irule...) + if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { + return false, err + } + } + } + } + return e.Enforcer.RemovePolicies(rules) +} + +func (e *CachedEnforcer) getCachedResult(key string) (res bool, err error) { + e.locker.Lock() + defer e.locker.Unlock() + return e.cache.Get(key) +} + +func (e *CachedEnforcer) SetExpireTime(expireTime time.Duration) { + e.expireTime = expireTime +} + +func (e *CachedEnforcer) SetCache(c cache.Cache) { + e.cache = c +} + +func (e *CachedEnforcer) setCachedResult(key string, res bool, extra ...interface{}) error { + e.locker.Lock() + defer e.locker.Unlock() + return e.cache.Set(key, res, extra...) +} + +func (e *CachedEnforcer) getKey(params ...interface{}) (string, bool) { + return GetCacheKey(params...) +} + +// InvalidateCache deletes all the existing cached decisions. +func (e *CachedEnforcer) InvalidateCache() error { + e.locker.Lock() + defer e.locker.Unlock() + return e.cache.Clear() +} + +func GetCacheKey(params ...interface{}) (string, bool) { + key := strings.Builder{} + for _, param := range params { + switch typedParam := param.(type) { + case string: + key.WriteString(typedParam) + case CacheableParam: + key.WriteString(typedParam.GetCacheKey()) + default: + return "", false + } + key.WriteString("$$") + } + return key.String(), true +} + +// ClearPolicy clears all policy. +func (e *CachedEnforcer) ClearPolicy() { + if atomic.LoadInt32(&e.enableCache) != 0 { + if err := e.cache.Clear(); err != nil { + e.logger.LogError(err, "clear cache failed") + return + } + } + e.Enforcer.ClearPolicy() +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_cached_synced.go b/vendor/github.com/casbin/casbin/v2/enforcer_cached_synced.go new file mode 100644 index 0000000000..0032460fc3 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer_cached_synced.go @@ -0,0 +1,180 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/casbin/casbin/v2/persist/cache" +) + +// SyncedCachedEnforcer wraps Enforcer and provides decision sync cache. +type SyncedCachedEnforcer struct { + *SyncedEnforcer + expireTime time.Duration + cache cache.Cache + enableCache int32 + locker *sync.RWMutex +} + +// NewSyncedCachedEnforcer creates a sync cached enforcer via file or DB. +func NewSyncedCachedEnforcer(params ...interface{}) (*SyncedCachedEnforcer, error) { + e := &SyncedCachedEnforcer{} + var err error + e.SyncedEnforcer, err = NewSyncedEnforcer(params...) + if err != nil { + return nil, err + } + + e.enableCache = 1 + e.cache, _ = cache.NewSyncCache() + e.locker = new(sync.RWMutex) + return e, nil +} + +// EnableCache determines whether to enable cache on Enforce(). When enableCache is enabled, cached result (true | false) will be returned for previous decisions. +func (e *SyncedCachedEnforcer) EnableCache(enableCache bool) { + var enabled int32 + if enableCache { + enabled = 1 + } + atomic.StoreInt32(&e.enableCache, enabled) +} + +// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). +// if rvals is not string , ignore the cache. +func (e *SyncedCachedEnforcer) Enforce(rvals ...interface{}) (bool, error) { + if atomic.LoadInt32(&e.enableCache) == 0 { + return e.SyncedEnforcer.Enforce(rvals...) + } + + key, ok := e.getKey(rvals...) + if !ok { + return e.SyncedEnforcer.Enforce(rvals...) + } + + if res, err := e.getCachedResult(key); err == nil { + return res, nil + } else if err != cache.ErrNoSuchKey { + return res, err + } + + res, err := e.SyncedEnforcer.Enforce(rvals...) + if err != nil { + return false, err + } + + err = e.setCachedResult(key, res, e.expireTime) + return res, err +} + +func (e *SyncedCachedEnforcer) LoadPolicy() error { + if atomic.LoadInt32(&e.enableCache) != 0 { + if err := e.cache.Clear(); err != nil { + return err + } + } + return e.SyncedEnforcer.LoadPolicy() +} + +func (e *SyncedCachedEnforcer) AddPolicy(params ...interface{}) (bool, error) { + if ok, err := e.checkOneAndRemoveCache(params...); !ok { + return ok, err + } + return e.SyncedEnforcer.AddPolicy(params...) +} + +func (e *SyncedCachedEnforcer) AddPolicies(rules [][]string) (bool, error) { + if ok, err := e.checkManyAndRemoveCache(rules); !ok { + return ok, err + } + return e.SyncedEnforcer.AddPolicies(rules) +} + +func (e *SyncedCachedEnforcer) RemovePolicy(params ...interface{}) (bool, error) { + if ok, err := e.checkOneAndRemoveCache(params...); !ok { + return ok, err + } + return e.SyncedEnforcer.RemovePolicy(params...) +} + +func (e *SyncedCachedEnforcer) RemovePolicies(rules [][]string) (bool, error) { + if ok, err := e.checkManyAndRemoveCache(rules); !ok { + return ok, err + } + return e.SyncedEnforcer.RemovePolicies(rules) +} + +func (e *SyncedCachedEnforcer) getCachedResult(key string) (res bool, err error) { + return e.cache.Get(key) +} + +func (e *SyncedCachedEnforcer) SetExpireTime(expireTime time.Duration) { + e.locker.Lock() + defer e.locker.Unlock() + e.expireTime = expireTime +} + +// SetCache need to be sync cache. +func (e *SyncedCachedEnforcer) SetCache(c cache.Cache) { + e.locker.Lock() + defer e.locker.Unlock() + e.cache = c +} + +func (e *SyncedCachedEnforcer) setCachedResult(key string, res bool, extra ...interface{}) error { + return e.cache.Set(key, res, extra...) +} + +func (e *SyncedCachedEnforcer) getKey(params ...interface{}) (string, bool) { + return GetCacheKey(params...) +} + +// InvalidateCache deletes all the existing cached decisions. +func (e *SyncedCachedEnforcer) InvalidateCache() error { + return e.cache.Clear() +} + +func (e *SyncedCachedEnforcer) checkOneAndRemoveCache(params ...interface{}) (bool, error) { + if atomic.LoadInt32(&e.enableCache) != 0 { + key, ok := e.getKey(params...) + if ok { + if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { + return false, err + } + } + } + return true, nil +} + +func (e *SyncedCachedEnforcer) checkManyAndRemoveCache(rules [][]string) (bool, error) { + if len(rules) != 0 { + if atomic.LoadInt32(&e.enableCache) != 0 { + irule := make([]interface{}, len(rules[0])) + for _, rule := range rules { + for i, param := range rule { + irule[i] = param + } + key, _ := e.getKey(irule...) + if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { + return false, err + } + } + } + } + return true, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_distributed.go b/vendor/github.com/casbin/casbin/v2/enforcer_distributed.go new file mode 100644 index 0000000000..09f667237a --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer_distributed.go @@ -0,0 +1,239 @@ +package casbin + +import ( + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" +) + +// DistributedEnforcer wraps SyncedEnforcer for dispatcher. +type DistributedEnforcer struct { + *SyncedEnforcer +} + +func NewDistributedEnforcer(params ...interface{}) (*DistributedEnforcer, error) { + e := &DistributedEnforcer{} + var err error + e.SyncedEnforcer, err = NewSyncedEnforcer(params...) + if err != nil { + return nil, err + } + + return e, nil +} + +// SetDispatcher sets the current dispatcher. +func (d *DistributedEnforcer) SetDispatcher(dispatcher persist.Dispatcher) { + d.dispatcher = dispatcher +} + +// AddPoliciesSelf provides a method for dispatcher to add authorization rules to the current policy. +// The function returns the rules affected and error. +func (d *DistributedEnforcer) AddPoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + var noExistsPolicy [][]string + for _, rule := range rules { + var hasPolicy bool + hasPolicy, err = d.model.HasPolicy(sec, ptype, rule) + if err != nil { + return nil, err + } + if !hasPolicy { + noExistsPolicy = append(noExistsPolicy, rule) + } + } + + if err = d.adapter.(persist.BatchAdapter).AddPolicies(sec, ptype, noExistsPolicy); err != nil && err.Error() != notImplemented { + return nil, err + } + } + + affected, err = d.model.AddPoliciesWithAffected(sec, ptype, rules) + if err != nil { + return affected, err + } + + if sec == "g" { + err := d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, affected) + if err != nil { + return affected, err + } + } + + return affected, nil +} + +// RemovePoliciesSelf provides a method for dispatcher to remove a set of rules from current policy. +// The function returns the rules affected and error. +func (d *DistributedEnforcer) RemovePoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + if err = d.adapter.(persist.BatchAdapter).RemovePolicies(sec, ptype, rules); err != nil { + if err.Error() != notImplemented { + return nil, err + } + } + } + + affected, err = d.model.RemovePoliciesWithAffected(sec, ptype, rules) + if err != nil { + return affected, err + } + + if sec == "g" { + err = d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, affected) + if err != nil { + return affected, err + } + } + + return affected, err +} + +// RemoveFilteredPolicySelf provides a method for dispatcher to remove an authorization rule from the current policy, field filters can be specified. +// The function returns the rules affected and error. +func (d *DistributedEnforcer) RemoveFilteredPolicySelf(shouldPersist func() bool, sec string, ptype string, fieldIndex int, fieldValues ...string) (affected [][]string, err error) { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + if err = d.adapter.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...); err != nil { + if err.Error() != notImplemented { + return nil, err + } + } + } + + _, affected, err = d.model.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) + if err != nil { + return affected, err + } + + if sec == "g" { + err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, affected) + if err != nil { + return affected, err + } + } + + return affected, nil +} + +// ClearPolicySelf provides a method for dispatcher to clear all rules from the current policy. +func (d *DistributedEnforcer) ClearPolicySelf(shouldPersist func() bool) error { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + err := d.adapter.SavePolicy(nil) + if err != nil { + return err + } + } + + d.model.ClearPolicy() + + return nil +} + +// UpdatePolicySelf provides a method for dispatcher to update an authorization rule from the current policy. +func (d *DistributedEnforcer) UpdatePolicySelf(shouldPersist func() bool, sec string, ptype string, oldRule, newRule []string) (affected bool, err error) { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + err = d.adapter.(persist.UpdatableAdapter).UpdatePolicy(sec, ptype, oldRule, newRule) + if err != nil { + return false, err + } + } + + ruleUpdated, err := d.model.UpdatePolicy(sec, ptype, oldRule, newRule) + if !ruleUpdated || err != nil { + return ruleUpdated, err + } + + if sec == "g" { + err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, [][]string{oldRule}) // remove the old rule + if err != nil { + return ruleUpdated, err + } + err = d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, [][]string{newRule}) // add the new rule + if err != nil { + return ruleUpdated, err + } + } + + return ruleUpdated, nil +} + +// UpdatePoliciesSelf provides a method for dispatcher to update a set of authorization rules from the current policy. +func (d *DistributedEnforcer) UpdatePoliciesSelf(shouldPersist func() bool, sec string, ptype string, oldRules, newRules [][]string) (affected bool, err error) { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + err = d.adapter.(persist.UpdatableAdapter).UpdatePolicies(sec, ptype, oldRules, newRules) + if err != nil { + return false, err + } + } + + ruleUpdated, err := d.model.UpdatePolicies(sec, ptype, oldRules, newRules) + if !ruleUpdated || err != nil { + return ruleUpdated, err + } + + if sec == "g" { + err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rule + if err != nil { + return ruleUpdated, err + } + err = d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rule + if err != nil { + return ruleUpdated, err + } + } + + return ruleUpdated, nil +} + +// UpdateFilteredPoliciesSelf provides a method for dispatcher to update a set of authorization rules from the current policy. +func (d *DistributedEnforcer) UpdateFilteredPoliciesSelf(shouldPersist func() bool, sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + d.m.Lock() + defer d.m.Unlock() + var ( + oldRules [][]string + err error + ) + if shouldPersist != nil && shouldPersist() { + oldRules, err = d.adapter.(persist.UpdatableAdapter).UpdateFilteredPolicies(sec, ptype, newRules, fieldIndex, fieldValues...) + if err != nil { + return false, err + } + } + + ruleChanged, err := d.model.RemovePolicies(sec, ptype, oldRules) + if err != nil { + return ruleChanged, err + } + err = d.model.AddPolicies(sec, ptype, newRules) + if err != nil { + return ruleChanged, err + } + ruleChanged = ruleChanged && len(newRules) != 0 + if !ruleChanged { + return ruleChanged, nil + } + + if sec == "g" { + err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rule + if err != nil { + return ruleChanged, err + } + err = d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rule + if err != nil { + return ruleChanged, err + } + } + + return true, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_interface.go b/vendor/github.com/casbin/casbin/v2/enforcer_interface.go new file mode 100644 index 0000000000..d22dcf10b3 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer_interface.go @@ -0,0 +1,177 @@ +// Copyright 2019 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "github.com/casbin/casbin/v2/effector" + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" + "github.com/casbin/casbin/v2/rbac" + "github.com/casbin/govaluate" +) + +var _ IEnforcer = &Enforcer{} +var _ IEnforcer = &SyncedEnforcer{} +var _ IEnforcer = &CachedEnforcer{} + +// IEnforcer is the API interface of Enforcer. +type IEnforcer interface { + /* Enforcer API */ + InitWithFile(modelPath string, policyPath string) error + InitWithAdapter(modelPath string, adapter persist.Adapter) error + InitWithModelAndAdapter(m model.Model, adapter persist.Adapter) error + LoadModel() error + GetModel() model.Model + SetModel(m model.Model) + GetAdapter() persist.Adapter + SetAdapter(adapter persist.Adapter) + SetWatcher(watcher persist.Watcher) error + GetRoleManager() rbac.RoleManager + SetRoleManager(rm rbac.RoleManager) + SetEffector(eft effector.Effector) + ClearPolicy() + LoadPolicy() error + LoadFilteredPolicy(filter interface{}) error + LoadIncrementalFilteredPolicy(filter interface{}) error + IsFiltered() bool + SavePolicy() error + EnableEnforce(enable bool) + EnableLog(enable bool) + EnableAutoNotifyWatcher(enable bool) + EnableAutoSave(autoSave bool) + EnableAutoBuildRoleLinks(autoBuildRoleLinks bool) + BuildRoleLinks() error + Enforce(rvals ...interface{}) (bool, error) + EnforceWithMatcher(matcher string, rvals ...interface{}) (bool, error) + EnforceEx(rvals ...interface{}) (bool, []string, error) + EnforceExWithMatcher(matcher string, rvals ...interface{}) (bool, []string, error) + BatchEnforce(requests [][]interface{}) ([]bool, error) + BatchEnforceWithMatcher(matcher string, requests [][]interface{}) ([]bool, error) + + /* RBAC API */ + GetRolesForUser(name string, domain ...string) ([]string, error) + GetUsersForRole(name string, domain ...string) ([]string, error) + HasRoleForUser(name string, role string, domain ...string) (bool, error) + AddRoleForUser(user string, role string, domain ...string) (bool, error) + AddPermissionForUser(user string, permission ...string) (bool, error) + AddPermissionsForUser(user string, permissions ...[]string) (bool, error) + DeletePermissionForUser(user string, permission ...string) (bool, error) + DeletePermissionsForUser(user string) (bool, error) + GetPermissionsForUser(user string, domain ...string) ([][]string, error) + HasPermissionForUser(user string, permission ...string) (bool, error) + GetImplicitRolesForUser(name string, domain ...string) ([]string, error) + GetImplicitPermissionsForUser(user string, domain ...string) ([][]string, error) + GetImplicitUsersForPermission(permission ...string) ([]string, error) + DeleteRoleForUser(user string, role string, domain ...string) (bool, error) + DeleteRolesForUser(user string, domain ...string) (bool, error) + DeleteUser(user string) (bool, error) + DeleteRole(role string) (bool, error) + DeletePermission(permission ...string) (bool, error) + + /* RBAC API with domains*/ + GetUsersForRoleInDomain(name string, domain string) []string + GetRolesForUserInDomain(name string, domain string) []string + GetPermissionsForUserInDomain(user string, domain string) [][]string + AddRoleForUserInDomain(user string, role string, domain string) (bool, error) + DeleteRoleForUserInDomain(user string, role string, domain string) (bool, error) + GetAllUsersByDomain(domain string) ([]string, error) + DeleteRolesForUserInDomain(user string, domain string) (bool, error) + DeleteAllUsersByDomain(domain string) (bool, error) + DeleteDomains(domains ...string) (bool, error) + GetAllDomains() ([]string, error) + GetAllRolesByDomain(domain string) ([]string, error) + + /* Management API */ + GetAllSubjects() ([]string, error) + GetAllNamedSubjects(ptype string) ([]string, error) + GetAllObjects() ([]string, error) + GetAllNamedObjects(ptype string) ([]string, error) + GetAllActions() ([]string, error) + GetAllNamedActions(ptype string) ([]string, error) + GetAllRoles() ([]string, error) + GetAllNamedRoles(ptype string) ([]string, error) + GetPolicy() ([][]string, error) + GetFilteredPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) + GetNamedPolicy(ptype string) ([][]string, error) + GetFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) + GetGroupingPolicy() ([][]string, error) + GetFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) + GetNamedGroupingPolicy(ptype string) ([][]string, error) + GetFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) + HasPolicy(params ...interface{}) (bool, error) + HasNamedPolicy(ptype string, params ...interface{}) (bool, error) + AddPolicy(params ...interface{}) (bool, error) + AddPolicies(rules [][]string) (bool, error) + AddNamedPolicy(ptype string, params ...interface{}) (bool, error) + AddNamedPolicies(ptype string, rules [][]string) (bool, error) + AddPoliciesEx(rules [][]string) (bool, error) + AddNamedPoliciesEx(ptype string, rules [][]string) (bool, error) + RemovePolicy(params ...interface{}) (bool, error) + RemovePolicies(rules [][]string) (bool, error) + RemoveFilteredPolicy(fieldIndex int, fieldValues ...string) (bool, error) + RemoveNamedPolicy(ptype string, params ...interface{}) (bool, error) + RemoveNamedPolicies(ptype string, rules [][]string) (bool, error) + RemoveFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) + HasGroupingPolicy(params ...interface{}) (bool, error) + HasNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) + AddGroupingPolicy(params ...interface{}) (bool, error) + AddGroupingPolicies(rules [][]string) (bool, error) + AddGroupingPoliciesEx(rules [][]string) (bool, error) + AddNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) + AddNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) + AddNamedGroupingPoliciesEx(ptype string, rules [][]string) (bool, error) + RemoveGroupingPolicy(params ...interface{}) (bool, error) + RemoveGroupingPolicies(rules [][]string) (bool, error) + RemoveFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) (bool, error) + RemoveNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) + RemoveNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) + RemoveFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) + AddFunction(name string, function govaluate.ExpressionFunction) + + UpdatePolicy(oldPolicy []string, newPolicy []string) (bool, error) + UpdatePolicies(oldPolicies [][]string, newPolicies [][]string) (bool, error) + UpdateFilteredPolicies(newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) + + UpdateGroupingPolicy(oldRule []string, newRule []string) (bool, error) + UpdateGroupingPolicies(oldRules [][]string, newRules [][]string) (bool, error) + UpdateNamedGroupingPolicy(ptype string, oldRule []string, newRule []string) (bool, error) + UpdateNamedGroupingPolicies(ptype string, oldRules [][]string, newRules [][]string) (bool, error) + + /* Management API with autoNotifyWatcher disabled */ + SelfAddPolicy(sec string, ptype string, rule []string) (bool, error) + SelfAddPolicies(sec string, ptype string, rules [][]string) (bool, error) + SelfAddPoliciesEx(sec string, ptype string, rules [][]string) (bool, error) + SelfRemovePolicy(sec string, ptype string, rule []string) (bool, error) + SelfRemovePolicies(sec string, ptype string, rules [][]string) (bool, error) + SelfRemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, error) + SelfUpdatePolicy(sec string, ptype string, oldRule, newRule []string) (bool, error) + SelfUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) +} + +var _ IDistributedEnforcer = &DistributedEnforcer{} + +// IDistributedEnforcer defines dispatcher enforcer. +type IDistributedEnforcer interface { + IEnforcer + SetDispatcher(dispatcher persist.Dispatcher) + /* Management API for DistributedEnforcer*/ + AddPoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) + RemovePoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) + RemoveFilteredPolicySelf(shouldPersist func() bool, sec string, ptype string, fieldIndex int, fieldValues ...string) (affected [][]string, err error) + ClearPolicySelf(shouldPersist func() bool) error + UpdatePolicySelf(shouldPersist func() bool, sec string, ptype string, oldRule, newRule []string) (affected bool, err error) + UpdatePoliciesSelf(shouldPersist func() bool, sec string, ptype string, oldRules, newRules [][]string) (affected bool, err error) + UpdateFilteredPoliciesSelf(shouldPersist func() bool, sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) (bool, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_synced.go b/vendor/github.com/casbin/casbin/v2/enforcer_synced.go new file mode 100644 index 0000000000..ae2fc7c4d2 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer_synced.go @@ -0,0 +1,650 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/casbin/govaluate" + + "github.com/casbin/casbin/v2/persist" +) + +// SyncedEnforcer wraps Enforcer and provides synchronized access. +type SyncedEnforcer struct { + *Enforcer + m sync.RWMutex + stopAutoLoad chan struct{} + autoLoadRunning int32 +} + +// NewSyncedEnforcer creates a synchronized enforcer via file or DB. +func NewSyncedEnforcer(params ...interface{}) (*SyncedEnforcer, error) { + e := &SyncedEnforcer{} + var err error + e.Enforcer, err = NewEnforcer(params...) + if err != nil { + return nil, err + } + + e.stopAutoLoad = make(chan struct{}, 1) + e.autoLoadRunning = 0 + return e, nil +} + +// GetLock return the private RWMutex lock. +func (e *SyncedEnforcer) GetLock() *sync.RWMutex { + return &e.m +} + +// IsAutoLoadingRunning check if SyncedEnforcer is auto loading policies. +func (e *SyncedEnforcer) IsAutoLoadingRunning() bool { + return atomic.LoadInt32(&(e.autoLoadRunning)) != 0 +} + +// StartAutoLoadPolicy starts a go routine that will every specified duration call LoadPolicy. +func (e *SyncedEnforcer) StartAutoLoadPolicy(d time.Duration) { + // Don't start another goroutine if there is already one running + if !atomic.CompareAndSwapInt32(&e.autoLoadRunning, 0, 1) { + return + } + + ticker := time.NewTicker(d) + go func() { + defer func() { + ticker.Stop() + atomic.StoreInt32(&(e.autoLoadRunning), int32(0)) + }() + n := 1 + for { + select { + case <-ticker.C: + // error intentionally ignored + _ = e.LoadPolicy() + // Uncomment this line to see when the policy is loaded. + // log.Print("Load policy for time: ", n) + n++ + case <-e.stopAutoLoad: + return + } + } + }() +} + +// StopAutoLoadPolicy causes the go routine to exit. +func (e *SyncedEnforcer) StopAutoLoadPolicy() { + if e.IsAutoLoadingRunning() { + e.stopAutoLoad <- struct{}{} + } +} + +// SetWatcher sets the current watcher. +func (e *SyncedEnforcer) SetWatcher(watcher persist.Watcher) error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SetWatcher(watcher) +} + +// LoadModel reloads the model from the model CONF file. +func (e *SyncedEnforcer) LoadModel() error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.LoadModel() +} + +// ClearPolicy clears all policy. +func (e *SyncedEnforcer) ClearPolicy() { + e.m.Lock() + defer e.m.Unlock() + e.Enforcer.ClearPolicy() +} + +// LoadPolicy reloads the policy from file/database. +func (e *SyncedEnforcer) LoadPolicy() error { + e.m.RLock() + newModel, err := e.loadPolicyFromAdapter(e.model) + e.m.RUnlock() + if err != nil { + return err + } + e.m.Lock() + err = e.applyModifiedModel(newModel) + e.m.Unlock() + if err != nil { + return err + } + return nil +} + +// LoadFilteredPolicy reloads a filtered policy from file/database. +func (e *SyncedEnforcer) LoadFilteredPolicy(filter interface{}) error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.LoadFilteredPolicy(filter) +} + +// LoadIncrementalFilteredPolicy reloads a filtered policy from file/database. +func (e *SyncedEnforcer) LoadIncrementalFilteredPolicy(filter interface{}) error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.LoadIncrementalFilteredPolicy(filter) +} + +// SavePolicy saves the current policy (usually after changed with Casbin API) back to file/database. +func (e *SyncedEnforcer) SavePolicy() error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SavePolicy() +} + +// BuildRoleLinks manually rebuild the role inheritance relations. +func (e *SyncedEnforcer) BuildRoleLinks() error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.BuildRoleLinks() +} + +// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). +func (e *SyncedEnforcer) Enforce(rvals ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.Enforce(rvals...) +} + +// EnforceWithMatcher use a custom matcher to decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (matcher, sub, obj, act), use model matcher by default when matcher is "". +func (e *SyncedEnforcer) EnforceWithMatcher(matcher string, rvals ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.EnforceWithMatcher(matcher, rvals...) +} + +// EnforceEx explain enforcement by informing matched rules. +func (e *SyncedEnforcer) EnforceEx(rvals ...interface{}) (bool, []string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.EnforceEx(rvals...) +} + +// EnforceExWithMatcher use a custom matcher and explain enforcement by informing matched rules. +func (e *SyncedEnforcer) EnforceExWithMatcher(matcher string, rvals ...interface{}) (bool, []string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.EnforceExWithMatcher(matcher, rvals...) +} + +// BatchEnforce enforce in batches. +func (e *SyncedEnforcer) BatchEnforce(requests [][]interface{}) ([]bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.BatchEnforce(requests) +} + +// BatchEnforceWithMatcher enforce with matcher in batches. +func (e *SyncedEnforcer) BatchEnforceWithMatcher(matcher string, requests [][]interface{}) ([]bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.BatchEnforceWithMatcher(matcher, requests) +} + +// GetAllSubjects gets the list of subjects that show up in the current policy. +func (e *SyncedEnforcer) GetAllSubjects() ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllSubjects() +} + +// GetAllNamedSubjects gets the list of subjects that show up in the current named policy. +func (e *SyncedEnforcer) GetAllNamedSubjects(ptype string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllNamedSubjects(ptype) +} + +// GetAllObjects gets the list of objects that show up in the current policy. +func (e *SyncedEnforcer) GetAllObjects() ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllObjects() +} + +// GetAllNamedObjects gets the list of objects that show up in the current named policy. +func (e *SyncedEnforcer) GetAllNamedObjects(ptype string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllNamedObjects(ptype) +} + +// GetAllActions gets the list of actions that show up in the current policy. +func (e *SyncedEnforcer) GetAllActions() ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllActions() +} + +// GetAllNamedActions gets the list of actions that show up in the current named policy. +func (e *SyncedEnforcer) GetAllNamedActions(ptype string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllNamedActions(ptype) +} + +// GetAllRoles gets the list of roles that show up in the current policy. +func (e *SyncedEnforcer) GetAllRoles() ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllRoles() +} + +// GetAllNamedRoles gets the list of roles that show up in the current named policy. +func (e *SyncedEnforcer) GetAllNamedRoles(ptype string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllNamedRoles(ptype) +} + +// GetPolicy gets all the authorization rules in the policy. +func (e *SyncedEnforcer) GetPolicy() ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetPolicy() +} + +// GetFilteredPolicy gets all the authorization rules in the policy, field filters can be specified. +func (e *SyncedEnforcer) GetFilteredPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetFilteredPolicy(fieldIndex, fieldValues...) +} + +// GetNamedPolicy gets all the authorization rules in the named policy. +func (e *SyncedEnforcer) GetNamedPolicy(ptype string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetNamedPolicy(ptype) +} + +// GetFilteredNamedPolicy gets all the authorization rules in the named policy, field filters can be specified. +func (e *SyncedEnforcer) GetFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetFilteredNamedPolicy(ptype, fieldIndex, fieldValues...) +} + +// GetGroupingPolicy gets all the role inheritance rules in the policy. +func (e *SyncedEnforcer) GetGroupingPolicy() ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetGroupingPolicy() +} + +// GetFilteredGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. +func (e *SyncedEnforcer) GetFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetFilteredGroupingPolicy(fieldIndex, fieldValues...) +} + +// GetNamedGroupingPolicy gets all the role inheritance rules in the policy. +func (e *SyncedEnforcer) GetNamedGroupingPolicy(ptype string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetNamedGroupingPolicy(ptype) +} + +// GetFilteredNamedGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. +func (e *SyncedEnforcer) GetFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetFilteredNamedGroupingPolicy(ptype, fieldIndex, fieldValues...) +} + +// HasPolicy determines whether an authorization rule exists. +func (e *SyncedEnforcer) HasPolicy(params ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasPolicy(params...) +} + +// HasNamedPolicy determines whether a named authorization rule exists. +func (e *SyncedEnforcer) HasNamedPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasNamedPolicy(ptype, params...) +} + +// AddPolicy adds an authorization rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *SyncedEnforcer) AddPolicy(params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddPolicy(params...) +} + +// AddPolicies adds authorization rules to the current policy. +// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. +// Otherwise the function returns true for the corresponding rule by adding the new rule. +func (e *SyncedEnforcer) AddPolicies(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddPolicies(rules) +} + +// AddPoliciesEx adds authorization rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddPolicies, other non-existent rules are added instead of returning false directly. +func (e *SyncedEnforcer) AddPoliciesEx(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddPoliciesEx(rules) +} + +// AddNamedPolicy adds an authorization rule to the current named policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *SyncedEnforcer) AddNamedPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedPolicy(ptype, params...) +} + +// AddNamedPolicies adds authorization rules to the current named policy. +// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. +// Otherwise the function returns true for the corresponding by adding the new rule. +func (e *SyncedEnforcer) AddNamedPolicies(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedPolicies(ptype, rules) +} + +// AddNamedPoliciesEx adds authorization rules to the current named policy. +// If the rule already exists, the rule will not be added. +// But unlike AddNamedPolicies, other non-existent rules are added instead of returning false directly. +func (e *SyncedEnforcer) AddNamedPoliciesEx(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedPoliciesEx(ptype, rules) +} + +// RemovePolicy removes an authorization rule from the current policy. +func (e *SyncedEnforcer) RemovePolicy(params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemovePolicy(params...) +} + +// UpdatePolicy updates an authorization rule from the current policy. +func (e *SyncedEnforcer) UpdatePolicy(oldPolicy []string, newPolicy []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdatePolicy(oldPolicy, newPolicy) +} + +func (e *SyncedEnforcer) UpdateNamedPolicy(ptype string, p1 []string, p2 []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateNamedPolicy(ptype, p1, p2) +} + +// UpdatePolicies updates authorization rules from the current policies. +func (e *SyncedEnforcer) UpdatePolicies(oldPolices [][]string, newPolicies [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdatePolicies(oldPolices, newPolicies) +} + +func (e *SyncedEnforcer) UpdateNamedPolicies(ptype string, p1 [][]string, p2 [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateNamedPolicies(ptype, p1, p2) +} + +func (e *SyncedEnforcer) UpdateFilteredPolicies(newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateFilteredPolicies(newPolicies, fieldIndex, fieldValues...) +} + +func (e *SyncedEnforcer) UpdateFilteredNamedPolicies(ptype string, newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateFilteredNamedPolicies(ptype, newPolicies, fieldIndex, fieldValues...) +} + +// RemovePolicies removes authorization rules from the current policy. +func (e *SyncedEnforcer) RemovePolicies(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemovePolicies(rules) +} + +// RemoveFilteredPolicy removes an authorization rule from the current policy, field filters can be specified. +func (e *SyncedEnforcer) RemoveFilteredPolicy(fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveFilteredPolicy(fieldIndex, fieldValues...) +} + +// RemoveNamedPolicy removes an authorization rule from the current named policy. +func (e *SyncedEnforcer) RemoveNamedPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveNamedPolicy(ptype, params...) +} + +// RemoveNamedPolicies removes authorization rules from the current named policy. +func (e *SyncedEnforcer) RemoveNamedPolicies(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveNamedPolicies(ptype, rules) +} + +// RemoveFilteredNamedPolicy removes an authorization rule from the current named policy, field filters can be specified. +func (e *SyncedEnforcer) RemoveFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveFilteredNamedPolicy(ptype, fieldIndex, fieldValues...) +} + +// HasGroupingPolicy determines whether a role inheritance rule exists. +func (e *SyncedEnforcer) HasGroupingPolicy(params ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasGroupingPolicy(params...) +} + +// HasNamedGroupingPolicy determines whether a named role inheritance rule exists. +func (e *SyncedEnforcer) HasNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasNamedGroupingPolicy(ptype, params...) +} + +// AddGroupingPolicy adds a role inheritance rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *SyncedEnforcer) AddGroupingPolicy(params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddGroupingPolicy(params...) +} + +// AddGroupingPolicies adds role inheritance rulea to the current policy. +// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. +// Otherwise the function returns true for the corresponding policy rule by adding the new rule. +func (e *SyncedEnforcer) AddGroupingPolicies(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddGroupingPolicies(rules) +} + +// AddGroupingPoliciesEx adds role inheritance rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddGroupingPolicies, other non-existent rules are added instead of returning false directly. +func (e *SyncedEnforcer) AddGroupingPoliciesEx(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddGroupingPoliciesEx(rules) +} + +// AddNamedGroupingPolicy adds a named role inheritance rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *SyncedEnforcer) AddNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedGroupingPolicy(ptype, params...) +} + +// AddNamedGroupingPolicies adds named role inheritance rules to the current policy. +// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. +// Otherwise the function returns true for the corresponding policy rule by adding the new rule. +func (e *SyncedEnforcer) AddNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedGroupingPolicies(ptype, rules) +} + +// AddNamedGroupingPoliciesEx adds named role inheritance rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddNamedGroupingPolicies, other non-existent rules are added instead of returning false directly. +func (e *SyncedEnforcer) AddNamedGroupingPoliciesEx(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedGroupingPoliciesEx(ptype, rules) +} + +// RemoveGroupingPolicy removes a role inheritance rule from the current policy. +func (e *SyncedEnforcer) RemoveGroupingPolicy(params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveGroupingPolicy(params...) +} + +// RemoveGroupingPolicies removes role inheritance rules from the current policy. +func (e *SyncedEnforcer) RemoveGroupingPolicies(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveGroupingPolicies(rules) +} + +// RemoveFilteredGroupingPolicy removes a role inheritance rule from the current policy, field filters can be specified. +func (e *SyncedEnforcer) RemoveFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveFilteredGroupingPolicy(fieldIndex, fieldValues...) +} + +// RemoveNamedGroupingPolicy removes a role inheritance rule from the current named policy. +func (e *SyncedEnforcer) RemoveNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveNamedGroupingPolicy(ptype, params...) +} + +// RemoveNamedGroupingPolicies removes role inheritance rules from the current named policy. +func (e *SyncedEnforcer) RemoveNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveNamedGroupingPolicies(ptype, rules) +} + +func (e *SyncedEnforcer) UpdateGroupingPolicy(oldRule []string, newRule []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateGroupingPolicy(oldRule, newRule) +} + +func (e *SyncedEnforcer) UpdateGroupingPolicies(oldRules [][]string, newRules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateGroupingPolicies(oldRules, newRules) +} + +func (e *SyncedEnforcer) UpdateNamedGroupingPolicy(ptype string, oldRule []string, newRule []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateNamedGroupingPolicy(ptype, oldRule, newRule) +} + +func (e *SyncedEnforcer) UpdateNamedGroupingPolicies(ptype string, oldRules [][]string, newRules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateNamedGroupingPolicies(ptype, oldRules, newRules) +} + +// RemoveFilteredNamedGroupingPolicy removes a role inheritance rule from the current named policy, field filters can be specified. +func (e *SyncedEnforcer) RemoveFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveFilteredNamedGroupingPolicy(ptype, fieldIndex, fieldValues...) +} + +// AddFunction adds a customized function. +func (e *SyncedEnforcer) AddFunction(name string, function govaluate.ExpressionFunction) { + e.m.Lock() + defer e.m.Unlock() + e.Enforcer.AddFunction(name, function) +} + +func (e *SyncedEnforcer) SelfAddPolicy(sec string, ptype string, rule []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfAddPolicy(sec, ptype, rule) +} + +func (e *SyncedEnforcer) SelfAddPolicies(sec string, ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfAddPolicies(sec, ptype, rules) +} + +func (e *SyncedEnforcer) SelfAddPoliciesEx(sec string, ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfAddPoliciesEx(sec, ptype, rules) +} + +func (e *SyncedEnforcer) SelfRemovePolicy(sec string, ptype string, rule []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfRemovePolicy(sec, ptype, rule) +} + +func (e *SyncedEnforcer) SelfRemovePolicies(sec string, ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfRemovePolicies(sec, ptype, rules) +} + +func (e *SyncedEnforcer) SelfRemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfRemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) +} + +func (e *SyncedEnforcer) SelfUpdatePolicy(sec string, ptype string, oldRule, newRule []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfUpdatePolicy(sec, ptype, oldRule, newRule) +} + +func (e *SyncedEnforcer) SelfUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfUpdatePolicies(sec, ptype, oldRules, newRules) +} diff --git a/vendor/github.com/casbin/casbin/v2/errors/rbac_errors.go b/vendor/github.com/casbin/casbin/v2/errors/rbac_errors.go new file mode 100644 index 0000000000..2f358b3725 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/errors/rbac_errors.go @@ -0,0 +1,30 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "errors" + +// Global errors for rbac defined here. +var ( + ErrNameNotFound = errors.New("error: name does not exist") + ErrDomainParameter = errors.New("error: domain should be 1 parameter") + ErrLinkNotFound = errors.New("error: link between name1 and name2 does not exist") + ErrUseDomainParameter = errors.New("error: useDomain should be 1 parameter") + ErrInvalidFieldValuesParameter = errors.New("fieldValues requires at least one parameter") + + // GetAllowedObjectConditions errors. + ErrObjCondition = errors.New("need to meet the prefix required by the object condition") + ErrEmptyCondition = errors.New("GetAllowedObjectConditions have an empty condition") +) diff --git a/vendor/github.com/casbin/casbin/v2/frontend.go b/vendor/github.com/casbin/casbin/v2/frontend.go new file mode 100644 index 0000000000..101a23a5db --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/frontend.go @@ -0,0 +1,57 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "bytes" + "encoding/json" +) + +func CasbinJsGetPermissionForUser(e IEnforcer, user string) (string, error) { + model := e.GetModel() + m := map[string]interface{}{} + + m["m"] = model.ToText() + + pRules := [][]string{} + for ptype := range model["p"] { + policies, err := model.GetPolicy("p", ptype) + if err != nil { + return "", err + } + for _, rules := range policies { + pRules = append(pRules, append([]string{ptype}, rules...)) + } + } + m["p"] = pRules + + gRules := [][]string{} + for ptype := range model["g"] { + policies, err := model.GetPolicy("g", ptype) + if err != nil { + return "", err + } + for _, rules := range policies { + gRules = append(gRules, append([]string{ptype}, rules...)) + } + } + m["g"] = gRules + + result := bytes.NewBuffer([]byte{}) + encoder := json.NewEncoder(result) + encoder.SetEscapeHTML(false) + err := encoder.Encode(m) + return result.String(), err +} diff --git a/vendor/github.com/casbin/casbin/v2/frontend_old.go b/vendor/github.com/casbin/casbin/v2/frontend_old.go new file mode 100644 index 0000000000..139b164fba --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/frontend_old.go @@ -0,0 +1,30 @@ +// Copyright 2021 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import "encoding/json" + +func CasbinJsGetPermissionForUserOld(e IEnforcer, user string) ([]byte, error) { + policy, err := e.GetImplicitPermissionsForUser(user) + if err != nil { + return nil, err + } + permission := make(map[string][]string) + for i := 0; i < len(policy); i++ { + permission[policy[i][2]] = append(permission[policy[i][2]], policy[i][1]) + } + b, _ := json.Marshal(permission) + return b, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/internal_api.go b/vendor/github.com/casbin/casbin/v2/internal_api.go new file mode 100644 index 0000000000..cd329016c0 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/internal_api.go @@ -0,0 +1,497 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "fmt" + + Err "github.com/casbin/casbin/v2/errors" + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" +) + +const ( + notImplemented = "not implemented" +) + +func (e *Enforcer) shouldPersist() bool { + return e.adapter != nil && e.autoSave +} + +func (e *Enforcer) shouldNotify() bool { + return e.watcher != nil && e.autoNotifyWatcher +} + +// addPolicy adds a rule to the current policy. +func (e *Enforcer) addPolicyWithoutNotify(sec string, ptype string, rule []string) (bool, error) { + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.AddPolicies(sec, ptype, [][]string{rule}) + } + + hasPolicy, err := e.model.HasPolicy(sec, ptype, rule) + if hasPolicy || err != nil { + return hasPolicy, err + } + + if e.shouldPersist() { + if err = e.adapter.AddPolicy(sec, ptype, rule); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + err = e.model.AddPolicy(sec, ptype, rule) + if err != nil { + return false, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, [][]string{rule}) + if err != nil { + return true, err + } + } + + return true, nil +} + +// addPoliciesWithoutNotify adds rules to the current policy without notify +// If autoRemoveRepeat == true, existing rules are automatically filtered +// Otherwise, false is returned directly. +func (e *Enforcer) addPoliciesWithoutNotify(sec string, ptype string, rules [][]string, autoRemoveRepeat bool) (bool, error) { + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.AddPolicies(sec, ptype, rules) + } + + if !autoRemoveRepeat { + hasPolicies, err := e.model.HasPolicies(sec, ptype, rules) + if hasPolicies || err != nil { + return false, err + } + } + + if e.shouldPersist() { + if err := e.adapter.(persist.BatchAdapter).AddPolicies(sec, ptype, rules); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + err := e.model.AddPolicies(sec, ptype, rules) + if err != nil { + return false, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, rules) + if err != nil { + return true, err + } + + err = e.BuildIncrementalConditionalRoleLinks(model.PolicyAdd, ptype, rules) + if err != nil { + return true, err + } + } + + return true, nil +} + +// removePolicy removes a rule from the current policy. +func (e *Enforcer) removePolicyWithoutNotify(sec string, ptype string, rule []string) (bool, error) { + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.RemovePolicies(sec, ptype, [][]string{rule}) + } + + if e.shouldPersist() { + if err := e.adapter.RemovePolicy(sec, ptype, rule); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + ruleRemoved, err := e.model.RemovePolicy(sec, ptype, rule) + if !ruleRemoved || err != nil { + return ruleRemoved, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, [][]string{rule}) + if err != nil { + return ruleRemoved, err + } + } + + return ruleRemoved, nil +} + +func (e *Enforcer) updatePolicyWithoutNotify(sec string, ptype string, oldRule []string, newRule []string) (bool, error) { + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.UpdatePolicy(sec, ptype, oldRule, newRule) + } + + if e.shouldPersist() { + if err := e.adapter.(persist.UpdatableAdapter).UpdatePolicy(sec, ptype, oldRule, newRule); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + ruleUpdated, err := e.model.UpdatePolicy(sec, ptype, oldRule, newRule) + if !ruleUpdated || err != nil { + return ruleUpdated, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, [][]string{oldRule}) // remove the old rule + if err != nil { + return ruleUpdated, err + } + err = e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, [][]string{newRule}) // add the new rule + if err != nil { + return ruleUpdated, err + } + } + + return ruleUpdated, nil +} + +func (e *Enforcer) updatePoliciesWithoutNotify(sec string, ptype string, oldRules [][]string, newRules [][]string) (bool, error) { + if len(newRules) != len(oldRules) { + return false, fmt.Errorf("the length of oldRules should be equal to the length of newRules, but got the length of oldRules is %d, the length of newRules is %d", len(oldRules), len(newRules)) + } + + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.UpdatePolicies(sec, ptype, oldRules, newRules) + } + + if e.shouldPersist() { + if err := e.adapter.(persist.UpdatableAdapter).UpdatePolicies(sec, ptype, oldRules, newRules); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + ruleUpdated, err := e.model.UpdatePolicies(sec, ptype, oldRules, newRules) + if !ruleUpdated || err != nil { + return ruleUpdated, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rules + if err != nil { + return ruleUpdated, err + } + err = e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rules + if err != nil { + return ruleUpdated, err + } + } + + return ruleUpdated, nil +} + +// removePolicies removes rules from the current policy. +func (e *Enforcer) removePoliciesWithoutNotify(sec string, ptype string, rules [][]string) (bool, error) { + if hasPolicies, err := e.model.HasPolicies(sec, ptype, rules); !hasPolicies || err != nil { + return hasPolicies, err + } + + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.RemovePolicies(sec, ptype, rules) + } + + if e.shouldPersist() { + if err := e.adapter.(persist.BatchAdapter).RemovePolicies(sec, ptype, rules); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + rulesRemoved, err := e.model.RemovePolicies(sec, ptype, rules) + if !rulesRemoved || err != nil { + return rulesRemoved, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, rules) + if err != nil { + return rulesRemoved, err + } + } + return rulesRemoved, nil +} + +// removeFilteredPolicy removes rules based on field filters from the current policy. +func (e *Enforcer) removeFilteredPolicyWithoutNotify(sec string, ptype string, fieldIndex int, fieldValues []string) (bool, error) { + if len(fieldValues) == 0 { + return false, Err.ErrInvalidFieldValuesParameter + } + + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) + } + + if e.shouldPersist() { + if err := e.adapter.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + ruleRemoved, effects, err := e.model.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) + if !ruleRemoved || err != nil { + return ruleRemoved, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, effects) + if err != nil { + return ruleRemoved, err + } + } + + return ruleRemoved, nil +} + +func (e *Enforcer) updateFilteredPoliciesWithoutNotify(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) { + var ( + oldRules [][]string + err error + ) + + if _, err = e.model.GetAssertion(sec, ptype); err != nil { + return oldRules, err + } + + if e.shouldPersist() { + if oldRules, err = e.adapter.(persist.UpdatableAdapter).UpdateFilteredPolicies(sec, ptype, newRules, fieldIndex, fieldValues...); err != nil { + if err.Error() != notImplemented { + return nil, err + } + } + // For compatibility, because some adapters return oldRules containing ptype, see https://github.com/casbin/xorm-adapter/issues/49 + for i, oldRule := range oldRules { + if len(oldRules[i]) == len(e.model[sec][ptype].Tokens)+1 { + oldRules[i] = oldRule[1:] + } + } + } + + if e.dispatcher != nil && e.autoNotifyDispatcher { + return oldRules, e.dispatcher.UpdateFilteredPolicies(sec, ptype, oldRules, newRules) + } + + ruleChanged, err := e.model.RemovePolicies(sec, ptype, oldRules) + if err != nil { + return oldRules, err + } + err = e.model.AddPolicies(sec, ptype, newRules) + if err != nil { + return oldRules, err + } + ruleChanged = ruleChanged && len(newRules) != 0 + if !ruleChanged { + return make([][]string, 0), nil + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rules + if err != nil { + return oldRules, err + } + err = e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rules + if err != nil { + return oldRules, err + } + } + + return oldRules, nil +} + +// addPolicy adds a rule to the current policy. +func (e *Enforcer) addPolicy(sec string, ptype string, rule []string) (bool, error) { + ok, err := e.addPolicyWithoutNotify(sec, ptype, rule) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForAddPolicy(sec, ptype, rule...) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +// addPolicies adds rules to the current policy. +// If autoRemoveRepeat == true, existing rules are automatically filtered +// Otherwise, false is returned directly. +func (e *Enforcer) addPolicies(sec string, ptype string, rules [][]string, autoRemoveRepeat bool) (bool, error) { + ok, err := e.addPoliciesWithoutNotify(sec, ptype, rules, autoRemoveRepeat) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForAddPolicies(sec, ptype, rules...) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +// removePolicy removes a rule from the current policy. +func (e *Enforcer) removePolicy(sec string, ptype string, rule []string) (bool, error) { + ok, err := e.removePolicyWithoutNotify(sec, ptype, rule) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForRemovePolicy(sec, ptype, rule...) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +func (e *Enforcer) updatePolicy(sec string, ptype string, oldRule []string, newRule []string) (bool, error) { + ok, err := e.updatePolicyWithoutNotify(sec, ptype, oldRule, newRule) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.UpdatableWatcher); ok { + err = watcher.UpdateForUpdatePolicy(sec, ptype, oldRule, newRule) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +func (e *Enforcer) updatePolicies(sec string, ptype string, oldRules [][]string, newRules [][]string) (bool, error) { + ok, err := e.updatePoliciesWithoutNotify(sec, ptype, oldRules, newRules) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.UpdatableWatcher); ok { + err = watcher.UpdateForUpdatePolicies(sec, ptype, oldRules, newRules) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +// removePolicies removes rules from the current policy. +func (e *Enforcer) removePolicies(sec string, ptype string, rules [][]string) (bool, error) { + ok, err := e.removePoliciesWithoutNotify(sec, ptype, rules) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForRemovePolicies(sec, ptype, rules...) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +// removeFilteredPolicy removes rules based on field filters from the current policy. +func (e *Enforcer) removeFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues []string) (bool, error) { + ok, err := e.removeFilteredPolicyWithoutNotify(sec, ptype, fieldIndex, fieldValues) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForRemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +func (e *Enforcer) updateFilteredPolicies(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + oldRules, err := e.updateFilteredPoliciesWithoutNotify(sec, ptype, newRules, fieldIndex, fieldValues...) + ok := len(oldRules) != 0 + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.UpdatableWatcher); ok { + err = watcher.UpdateForUpdatePolicies(sec, ptype, oldRules, newRules) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +func (e *Enforcer) GetFieldIndex(ptype string, field string) (int, error) { + return e.model.GetFieldIndex(ptype, field) +} + +func (e *Enforcer) SetFieldIndex(ptype string, field string, index int) { + assertion := e.model["p"][ptype] + assertion.FieldIndexMap[field] = index +} diff --git a/vendor/github.com/casbin/casbin/v2/log/default_logger.go b/vendor/github.com/casbin/casbin/v2/log/default_logger.go new file mode 100644 index 0000000000..9994f390bf --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/log/default_logger.go @@ -0,0 +1,104 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "fmt" + "log" + "strings" +) + +// DefaultLogger is the implementation for a Logger using golang log. +type DefaultLogger struct { + enabled bool +} + +func (l *DefaultLogger) EnableLog(enable bool) { + l.enabled = enable +} + +func (l *DefaultLogger) IsEnabled() bool { + return l.enabled +} + +func (l *DefaultLogger) LogModel(model [][]string) { + if !l.enabled { + return + } + var str strings.Builder + str.WriteString("Model: ") + for _, v := range model { + str.WriteString(fmt.Sprintf("%v\n", v)) + } + + log.Println(str.String()) +} + +func (l *DefaultLogger) LogEnforce(matcher string, request []interface{}, result bool, explains [][]string) { + if !l.enabled { + return + } + + var reqStr strings.Builder + reqStr.WriteString("Request: ") + for i, rval := range request { + if i != len(request)-1 { + reqStr.WriteString(fmt.Sprintf("%v, ", rval)) + } else { + reqStr.WriteString(fmt.Sprintf("%v", rval)) + } + } + reqStr.WriteString(fmt.Sprintf(" ---> %t\n", result)) + + reqStr.WriteString("Hit Policy: ") + for i, pval := range explains { + if i != len(explains)-1 { + reqStr.WriteString(fmt.Sprintf("%v, ", pval)) + } else { + reqStr.WriteString(fmt.Sprintf("%v \n", pval)) + } + } + + log.Println(reqStr.String()) +} + +func (l *DefaultLogger) LogPolicy(policy map[string][][]string) { + if !l.enabled { + return + } + + var str strings.Builder + str.WriteString("Policy: ") + for k, v := range policy { + str.WriteString(fmt.Sprintf("%s : %v\n", k, v)) + } + + log.Println(str.String()) +} + +func (l *DefaultLogger) LogRole(roles []string) { + if !l.enabled { + return + } + + log.Println("Roles: ", strings.Join(roles, "\n")) +} + +func (l *DefaultLogger) LogError(err error, msg ...string) { + if !l.enabled { + return + } + log.Println(msg, err) +} diff --git a/vendor/github.com/casbin/casbin/v2/log/log_util.go b/vendor/github.com/casbin/casbin/v2/log/log_util.go new file mode 100644 index 0000000000..7edabf899b --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/log/log_util.go @@ -0,0 +1,52 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +var logger Logger = &DefaultLogger{} + +// SetLogger sets the current logger. +func SetLogger(l Logger) { + logger = l +} + +// GetLogger returns the current logger. +func GetLogger() Logger { + return logger +} + +// LogModel logs the model information. +func LogModel(model [][]string) { + logger.LogModel(model) +} + +// LogEnforce logs the enforcer information. +func LogEnforce(matcher string, request []interface{}, result bool, explains [][]string) { + logger.LogEnforce(matcher, request, result, explains) +} + +// LogRole log info related to role. +func LogRole(roles []string) { + logger.LogRole(roles) +} + +// LogPolicy logs the policy information. +func LogPolicy(policy map[string][][]string) { + logger.LogPolicy(policy) +} + +// LogError logs the error information. +func LogError(err error, msg ...string) { + logger.LogError(err, msg...) +} diff --git a/vendor/github.com/casbin/casbin/v2/log/logger.go b/vendor/github.com/casbin/casbin/v2/log/logger.go new file mode 100644 index 0000000000..8982cae6f5 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/log/logger.go @@ -0,0 +1,41 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +//go:generate mockgen -destination=./mocks/mock_logger.go -package=mocks github.com/casbin/casbin/v2/log Logger + +// Logger is the logging interface implementation. +type Logger interface { + // EnableLog controls whether print the message. + EnableLog(bool) + + // IsEnabled returns if logger is enabled. + IsEnabled() bool + + // LogModel log info related to model. + LogModel(model [][]string) + + // LogEnforce log info related to enforce. + LogEnforce(matcher string, request []interface{}, result bool, explains [][]string) + + // LogRole log info related to role. + LogRole(roles []string) + + // LogPolicy log info related to policy. + LogPolicy(policy map[string][][]string) + + // LogError log info relate to error + LogError(err error, msg ...string) +} diff --git a/vendor/github.com/casbin/casbin/v2/management_api.go b/vendor/github.com/casbin/casbin/v2/management_api.go new file mode 100644 index 0000000000..6641f83422 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/management_api.go @@ -0,0 +1,500 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "errors" + "fmt" + "strings" + + "github.com/casbin/casbin/v2/constant" + "github.com/casbin/casbin/v2/util" + "github.com/casbin/govaluate" +) + +// GetAllSubjects gets the list of subjects that show up in the current policy. +func (e *Enforcer) GetAllSubjects() ([]string, error) { + return e.model.GetValuesForFieldInPolicyAllTypesByName("p", constant.SubjectIndex) +} + +// GetAllNamedSubjects gets the list of subjects that show up in the current named policy. +func (e *Enforcer) GetAllNamedSubjects(ptype string) ([]string, error) { + fieldIndex, err := e.model.GetFieldIndex(ptype, constant.SubjectIndex) + if err != nil { + return nil, err + } + return e.model.GetValuesForFieldInPolicy("p", ptype, fieldIndex) +} + +// GetAllObjects gets the list of objects that show up in the current policy. +func (e *Enforcer) GetAllObjects() ([]string, error) { + return e.model.GetValuesForFieldInPolicyAllTypesByName("p", constant.ObjectIndex) +} + +// GetAllNamedObjects gets the list of objects that show up in the current named policy. +func (e *Enforcer) GetAllNamedObjects(ptype string) ([]string, error) { + fieldIndex, err := e.model.GetFieldIndex(ptype, constant.ObjectIndex) + if err != nil { + return nil, err + } + return e.model.GetValuesForFieldInPolicy("p", ptype, fieldIndex) +} + +// GetAllActions gets the list of actions that show up in the current policy. +func (e *Enforcer) GetAllActions() ([]string, error) { + return e.model.GetValuesForFieldInPolicyAllTypesByName("p", constant.ActionIndex) +} + +// GetAllNamedActions gets the list of actions that show up in the current named policy. +func (e *Enforcer) GetAllNamedActions(ptype string) ([]string, error) { + fieldIndex, err := e.model.GetFieldIndex(ptype, constant.ActionIndex) + if err != nil { + return nil, err + } + return e.model.GetValuesForFieldInPolicy("p", ptype, fieldIndex) +} + +// GetAllRoles gets the list of roles that show up in the current policy. +func (e *Enforcer) GetAllRoles() ([]string, error) { + return e.model.GetValuesForFieldInPolicyAllTypes("g", 1) +} + +// GetAllNamedRoles gets the list of roles that show up in the current named policy. +func (e *Enforcer) GetAllNamedRoles(ptype string) ([]string, error) { + return e.model.GetValuesForFieldInPolicy("g", ptype, 1) +} + +// GetPolicy gets all the authorization rules in the policy. +func (e *Enforcer) GetPolicy() ([][]string, error) { + return e.GetNamedPolicy("p") +} + +// GetFilteredPolicy gets all the authorization rules in the policy, field filters can be specified. +func (e *Enforcer) GetFilteredPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { + return e.GetFilteredNamedPolicy("p", fieldIndex, fieldValues...) +} + +// GetNamedPolicy gets all the authorization rules in the named policy. +func (e *Enforcer) GetNamedPolicy(ptype string) ([][]string, error) { + return e.model.GetPolicy("p", ptype) +} + +// GetFilteredNamedPolicy gets all the authorization rules in the named policy, field filters can be specified. +func (e *Enforcer) GetFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { + return e.model.GetFilteredPolicy("p", ptype, fieldIndex, fieldValues...) +} + +// GetGroupingPolicy gets all the role inheritance rules in the policy. +func (e *Enforcer) GetGroupingPolicy() ([][]string, error) { + return e.GetNamedGroupingPolicy("g") +} + +// GetFilteredGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. +func (e *Enforcer) GetFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { + return e.GetFilteredNamedGroupingPolicy("g", fieldIndex, fieldValues...) +} + +// GetNamedGroupingPolicy gets all the role inheritance rules in the policy. +func (e *Enforcer) GetNamedGroupingPolicy(ptype string) ([][]string, error) { + return e.model.GetPolicy("g", ptype) +} + +// GetFilteredNamedGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. +func (e *Enforcer) GetFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { + return e.model.GetFilteredPolicy("g", ptype, fieldIndex, fieldValues...) +} + +// GetFilteredNamedPolicyWithMatcher gets rules based on matcher from the policy. +func (e *Enforcer) GetFilteredNamedPolicyWithMatcher(ptype string, matcher string) ([][]string, error) { + var res [][]string + var err error + + functions := e.fm.GetFunctions() + if _, ok := e.model["g"]; ok { + for key, ast := range e.model["g"] { + // g must be a normal role definition (ast.RM != nil) + // or a conditional role definition (ast.CondRM != nil) + // ast.RM and ast.CondRM shouldn't be nil at the same time + if ast.RM != nil { + functions[key] = util.GenerateGFunction(ast.RM) + } + if ast.CondRM != nil { + functions[key] = util.GenerateConditionalGFunction(ast.CondRM) + } + } + } + + var expString string + if matcher == "" { + return res, fmt.Errorf("matcher is empty") + } else { + expString = util.RemoveComments(util.EscapeAssertion(matcher)) + } + + var expression *govaluate.EvaluableExpression + + expression, err = govaluate.NewEvaluableExpressionWithFunctions(expString, functions) + if err != nil { + return res, err + } + + pTokens := make(map[string]int, len(e.model["p"][ptype].Tokens)) + for i, token := range e.model["p"][ptype].Tokens { + pTokens[token] = i + } + + parameters := enforceParameters{ + pTokens: pTokens, + } + + if policyLen := len(e.model["p"][ptype].Policy); policyLen != 0 && strings.Contains(expString, ptype+"_") { + for _, pvals := range e.model["p"][ptype].Policy { + if len(e.model["p"][ptype].Tokens) != len(pvals) { + return res, fmt.Errorf( + "invalid policy size: expected %d, got %d, pvals: %v", + len(e.model["p"][ptype].Tokens), + len(pvals), + pvals) + } + + parameters.pVals = pvals + + result, err := expression.Eval(parameters) + + if err != nil { + return res, err + } + + switch result := result.(type) { + case bool: + if result { + res = append(res, pvals) + } + case float64: + if result != 0 { + res = append(res, pvals) + } + default: + return res, errors.New("matcher result should be bool, int or float") + } + } + } + return res, nil +} + +// HasPolicy determines whether an authorization rule exists. +func (e *Enforcer) HasPolicy(params ...interface{}) (bool, error) { + return e.HasNamedPolicy("p", params...) +} + +// HasNamedPolicy determines whether a named authorization rule exists. +func (e *Enforcer) HasNamedPolicy(ptype string, params ...interface{}) (bool, error) { + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + return e.model.HasPolicy("p", ptype, strSlice) + } + + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + return e.model.HasPolicy("p", ptype, policy) +} + +// AddPolicy adds an authorization rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *Enforcer) AddPolicy(params ...interface{}) (bool, error) { + return e.AddNamedPolicy("p", params...) +} + +// AddPolicies adds authorization rules to the current policy. +// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. +// Otherwise the function returns true for the corresponding rule by adding the new rule. +func (e *Enforcer) AddPolicies(rules [][]string) (bool, error) { + return e.AddNamedPolicies("p", rules) +} + +// AddPoliciesEx adds authorization rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddPolicies, other non-existent rules are added instead of returning false directly. +func (e *Enforcer) AddPoliciesEx(rules [][]string) (bool, error) { + return e.AddNamedPoliciesEx("p", rules) +} + +// AddNamedPolicy adds an authorization rule to the current named policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *Enforcer) AddNamedPolicy(ptype string, params ...interface{}) (bool, error) { + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + strSlice = append(make([]string, 0, len(strSlice)), strSlice...) + return e.addPolicy("p", ptype, strSlice) + } + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + return e.addPolicy("p", ptype, policy) +} + +// AddNamedPolicies adds authorization rules to the current named policy. +// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. +// Otherwise the function returns true for the corresponding by adding the new rule. +func (e *Enforcer) AddNamedPolicies(ptype string, rules [][]string) (bool, error) { + return e.addPolicies("p", ptype, rules, false) +} + +// AddNamedPoliciesEx adds authorization rules to the current named policy. +// If the rule already exists, the rule will not be added. +// But unlike AddNamedPolicies, other non-existent rules are added instead of returning false directly. +func (e *Enforcer) AddNamedPoliciesEx(ptype string, rules [][]string) (bool, error) { + return e.addPolicies("p", ptype, rules, true) +} + +// RemovePolicy removes an authorization rule from the current policy. +func (e *Enforcer) RemovePolicy(params ...interface{}) (bool, error) { + return e.RemoveNamedPolicy("p", params...) +} + +// UpdatePolicy updates an authorization rule from the current policy. +func (e *Enforcer) UpdatePolicy(oldPolicy []string, newPolicy []string) (bool, error) { + return e.UpdateNamedPolicy("p", oldPolicy, newPolicy) +} + +func (e *Enforcer) UpdateNamedPolicy(ptype string, p1 []string, p2 []string) (bool, error) { + return e.updatePolicy("p", ptype, p1, p2) +} + +// UpdatePolicies updates authorization rules from the current policies. +func (e *Enforcer) UpdatePolicies(oldPolices [][]string, newPolicies [][]string) (bool, error) { + return e.UpdateNamedPolicies("p", oldPolices, newPolicies) +} + +func (e *Enforcer) UpdateNamedPolicies(ptype string, p1 [][]string, p2 [][]string) (bool, error) { + return e.updatePolicies("p", ptype, p1, p2) +} + +func (e *Enforcer) UpdateFilteredPolicies(newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + return e.UpdateFilteredNamedPolicies("p", newPolicies, fieldIndex, fieldValues...) +} + +func (e *Enforcer) UpdateFilteredNamedPolicies(ptype string, newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + return e.updateFilteredPolicies("p", ptype, newPolicies, fieldIndex, fieldValues...) +} + +// RemovePolicies removes authorization rules from the current policy. +func (e *Enforcer) RemovePolicies(rules [][]string) (bool, error) { + return e.RemoveNamedPolicies("p", rules) +} + +// RemoveFilteredPolicy removes an authorization rule from the current policy, field filters can be specified. +func (e *Enforcer) RemoveFilteredPolicy(fieldIndex int, fieldValues ...string) (bool, error) { + return e.RemoveFilteredNamedPolicy("p", fieldIndex, fieldValues...) +} + +// RemoveNamedPolicy removes an authorization rule from the current named policy. +func (e *Enforcer) RemoveNamedPolicy(ptype string, params ...interface{}) (bool, error) { + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + return e.removePolicy("p", ptype, strSlice) + } + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + return e.removePolicy("p", ptype, policy) +} + +// RemoveNamedPolicies removes authorization rules from the current named policy. +func (e *Enforcer) RemoveNamedPolicies(ptype string, rules [][]string) (bool, error) { + return e.removePolicies("p", ptype, rules) +} + +// RemoveFilteredNamedPolicy removes an authorization rule from the current named policy, field filters can be specified. +func (e *Enforcer) RemoveFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + return e.removeFilteredPolicy("p", ptype, fieldIndex, fieldValues) +} + +// HasGroupingPolicy determines whether a role inheritance rule exists. +func (e *Enforcer) HasGroupingPolicy(params ...interface{}) (bool, error) { + return e.HasNamedGroupingPolicy("g", params...) +} + +// HasNamedGroupingPolicy determines whether a named role inheritance rule exists. +func (e *Enforcer) HasNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + return e.model.HasPolicy("g", ptype, strSlice) + } + + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + return e.model.HasPolicy("g", ptype, policy) +} + +// AddGroupingPolicy adds a role inheritance rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *Enforcer) AddGroupingPolicy(params ...interface{}) (bool, error) { + return e.AddNamedGroupingPolicy("g", params...) +} + +// AddGroupingPolicies adds role inheritance rules to the current policy. +// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. +// Otherwise the function returns true for the corresponding policy rule by adding the new rule. +func (e *Enforcer) AddGroupingPolicies(rules [][]string) (bool, error) { + return e.AddNamedGroupingPolicies("g", rules) +} + +// AddGroupingPoliciesEx adds role inheritance rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddGroupingPolicies, other non-existent rules are added instead of returning false directly. +func (e *Enforcer) AddGroupingPoliciesEx(rules [][]string) (bool, error) { + return e.AddNamedGroupingPoliciesEx("g", rules) +} + +// AddNamedGroupingPolicy adds a named role inheritance rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *Enforcer) AddNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + var ruleAdded bool + var err error + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + ruleAdded, err = e.addPolicy("g", ptype, strSlice) + } else { + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + ruleAdded, err = e.addPolicy("g", ptype, policy) + } + + return ruleAdded, err +} + +// AddNamedGroupingPolicies adds named role inheritance rules to the current policy. +// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. +// Otherwise the function returns true for the corresponding policy rule by adding the new rule. +func (e *Enforcer) AddNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { + return e.addPolicies("g", ptype, rules, false) +} + +// AddNamedGroupingPoliciesEx adds named role inheritance rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddNamedGroupingPolicies, other non-existent rules are added instead of returning false directly. +func (e *Enforcer) AddNamedGroupingPoliciesEx(ptype string, rules [][]string) (bool, error) { + return e.addPolicies("g", ptype, rules, true) +} + +// RemoveGroupingPolicy removes a role inheritance rule from the current policy. +func (e *Enforcer) RemoveGroupingPolicy(params ...interface{}) (bool, error) { + return e.RemoveNamedGroupingPolicy("g", params...) +} + +// RemoveGroupingPolicies removes role inheritance rules from the current policy. +func (e *Enforcer) RemoveGroupingPolicies(rules [][]string) (bool, error) { + return e.RemoveNamedGroupingPolicies("g", rules) +} + +// RemoveFilteredGroupingPolicy removes a role inheritance rule from the current policy, field filters can be specified. +func (e *Enforcer) RemoveFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) (bool, error) { + return e.RemoveFilteredNamedGroupingPolicy("g", fieldIndex, fieldValues...) +} + +// RemoveNamedGroupingPolicy removes a role inheritance rule from the current named policy. +func (e *Enforcer) RemoveNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + var ruleRemoved bool + var err error + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + ruleRemoved, err = e.removePolicy("g", ptype, strSlice) + } else { + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + ruleRemoved, err = e.removePolicy("g", ptype, policy) + } + + return ruleRemoved, err +} + +// RemoveNamedGroupingPolicies removes role inheritance rules from the current named policy. +func (e *Enforcer) RemoveNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { + return e.removePolicies("g", ptype, rules) +} + +func (e *Enforcer) UpdateGroupingPolicy(oldRule []string, newRule []string) (bool, error) { + return e.UpdateNamedGroupingPolicy("g", oldRule, newRule) +} + +// UpdateGroupingPolicies updates authorization rules from the current policies. +func (e *Enforcer) UpdateGroupingPolicies(oldRules [][]string, newRules [][]string) (bool, error) { + return e.UpdateNamedGroupingPolicies("g", oldRules, newRules) +} + +func (e *Enforcer) UpdateNamedGroupingPolicy(ptype string, oldRule []string, newRule []string) (bool, error) { + return e.updatePolicy("g", ptype, oldRule, newRule) +} + +func (e *Enforcer) UpdateNamedGroupingPolicies(ptype string, oldRules [][]string, newRules [][]string) (bool, error) { + return e.updatePolicies("g", ptype, oldRules, newRules) +} + +// RemoveFilteredNamedGroupingPolicy removes a role inheritance rule from the current named policy, field filters can be specified. +func (e *Enforcer) RemoveFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + return e.removeFilteredPolicy("g", ptype, fieldIndex, fieldValues) +} + +// AddFunction adds a customized function. +func (e *Enforcer) AddFunction(name string, function govaluate.ExpressionFunction) { + e.fm.AddFunction(name, function) +} + +func (e *Enforcer) SelfAddPolicy(sec string, ptype string, rule []string) (bool, error) { + return e.addPolicyWithoutNotify(sec, ptype, rule) +} + +func (e *Enforcer) SelfAddPolicies(sec string, ptype string, rules [][]string) (bool, error) { + return e.addPoliciesWithoutNotify(sec, ptype, rules, false) +} + +func (e *Enforcer) SelfAddPoliciesEx(sec string, ptype string, rules [][]string) (bool, error) { + return e.addPoliciesWithoutNotify(sec, ptype, rules, true) +} + +func (e *Enforcer) SelfRemovePolicy(sec string, ptype string, rule []string) (bool, error) { + return e.removePolicyWithoutNotify(sec, ptype, rule) +} + +func (e *Enforcer) SelfRemovePolicies(sec string, ptype string, rules [][]string) (bool, error) { + return e.removePoliciesWithoutNotify(sec, ptype, rules) +} + +func (e *Enforcer) SelfRemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + return e.removeFilteredPolicyWithoutNotify(sec, ptype, fieldIndex, fieldValues) +} + +func (e *Enforcer) SelfUpdatePolicy(sec string, ptype string, oldRule, newRule []string) (bool, error) { + return e.updatePolicyWithoutNotify(sec, ptype, oldRule, newRule) +} + +func (e *Enforcer) SelfUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) { + return e.updatePoliciesWithoutNotify(sec, ptype, oldRules, newRules) +} diff --git a/vendor/github.com/casbin/casbin/v2/model/assertion.go b/vendor/github.com/casbin/casbin/v2/model/assertion.go new file mode 100644 index 0000000000..7c5381d724 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/model/assertion.go @@ -0,0 +1,194 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "errors" + "strings" + + "github.com/casbin/casbin/v2/log" + "github.com/casbin/casbin/v2/rbac" +) + +// Assertion represents an expression in a section of the model. +// For example: r = sub, obj, act. +type Assertion struct { + Key string + Value string + Tokens []string + ParamsTokens []string + Policy [][]string + PolicyMap map[string]int + RM rbac.RoleManager + CondRM rbac.ConditionalRoleManager + FieldIndexMap map[string]int + + logger log.Logger +} + +func (ast *Assertion) buildIncrementalRoleLinks(rm rbac.RoleManager, op PolicyOp, rules [][]string) error { + ast.RM = rm + count := strings.Count(ast.Value, "_") + if count < 2 { + return errors.New("the number of \"_\" in role definition should be at least 2") + } + + for _, rule := range rules { + if len(rule) < count { + return errors.New("grouping policy elements do not meet role definition") + } + if len(rule) > count { + rule = rule[:count] + } + switch op { + case PolicyAdd: + err := rm.AddLink(rule[0], rule[1], rule[2:]...) + if err != nil { + return err + } + case PolicyRemove: + err := rm.DeleteLink(rule[0], rule[1], rule[2:]...) + if err != nil { + return err + } + } + } + return nil +} + +func (ast *Assertion) buildRoleLinks(rm rbac.RoleManager) error { + ast.RM = rm + count := strings.Count(ast.Value, "_") + if count < 2 { + return errors.New("the number of \"_\" in role definition should be at least 2") + } + for _, rule := range ast.Policy { + if len(rule) < count { + return errors.New("grouping policy elements do not meet role definition") + } + if len(rule) > count { + rule = rule[:count] + } + err := ast.RM.AddLink(rule[0], rule[1], rule[2:]...) + if err != nil { + return err + } + } + + return nil +} + +func (ast *Assertion) buildIncrementalConditionalRoleLinks(condRM rbac.ConditionalRoleManager, op PolicyOp, rules [][]string) error { + ast.CondRM = condRM + count := strings.Count(ast.Value, "_") + if count < 2 { + return errors.New("the number of \"_\" in role definition should be at least 2") + } + + for _, rule := range rules { + if len(rule) < count { + return errors.New("grouping policy elements do not meet role definition") + } + if len(rule) > count { + rule = rule[:count] + } + + var err error + domainRule := rule[2:len(ast.Tokens)] + + switch op { + case PolicyAdd: + err = ast.addConditionalRoleLink(rule, domainRule) + case PolicyRemove: + err = ast.CondRM.DeleteLink(rule[0], rule[1], rule[2:]...) + } + if err != nil { + return err + } + } + + return nil +} + +func (ast *Assertion) buildConditionalRoleLinks(condRM rbac.ConditionalRoleManager) error { + ast.CondRM = condRM + count := strings.Count(ast.Value, "_") + if count < 2 { + return errors.New("the number of \"_\" in role definition should be at least 2") + } + for _, rule := range ast.Policy { + if len(rule) < count { + return errors.New("grouping policy elements do not meet role definition") + } + if len(rule) > count { + rule = rule[:count] + } + + domainRule := rule[2:len(ast.Tokens)] + + err := ast.addConditionalRoleLink(rule, domainRule) + if err != nil { + return err + } + } + + return nil +} + +// addConditionalRoleLink adds Link to rbac.ConditionalRoleManager and sets the parameters for LinkConditionFunc. +func (ast *Assertion) addConditionalRoleLink(rule []string, domainRule []string) error { + var err error + if len(domainRule) == 0 { + err = ast.CondRM.AddLink(rule[0], rule[1]) + if err == nil { + ast.CondRM.SetLinkConditionFuncParams(rule[0], rule[1], rule[len(ast.Tokens):]...) + } + } else { + domain := domainRule[0] + err = ast.CondRM.AddLink(rule[0], rule[1], domain) + if err == nil { + ast.CondRM.SetDomainLinkConditionFuncParams(rule[0], rule[1], domain, rule[len(ast.Tokens):]...) + } + } + return err +} + +func (ast *Assertion) setLogger(logger log.Logger) { + ast.logger = logger +} + +func (ast *Assertion) copy() *Assertion { + tokens := append([]string(nil), ast.Tokens...) + policy := make([][]string, len(ast.Policy)) + + for i, p := range ast.Policy { + policy[i] = append(policy[i], p...) + } + policyMap := make(map[string]int) + for k, v := range ast.PolicyMap { + policyMap[k] = v + } + + newAst := &Assertion{ + Key: ast.Key, + Value: ast.Value, + PolicyMap: policyMap, + Tokens: tokens, + Policy: policy, + FieldIndexMap: ast.FieldIndexMap, + } + + return newAst +} diff --git a/vendor/github.com/casbin/casbin/v2/model/function.go b/vendor/github.com/casbin/casbin/v2/model/function.go new file mode 100644 index 0000000000..f1a8d00754 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/model/function.go @@ -0,0 +1,66 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sync" + + "github.com/casbin/casbin/v2/util" + "github.com/casbin/govaluate" +) + +// FunctionMap represents the collection of Function. +type FunctionMap struct { + fns *sync.Map +} + +// [string]govaluate.ExpressionFunction + +// AddFunction adds an expression function. +func (fm *FunctionMap) AddFunction(name string, function govaluate.ExpressionFunction) { + fm.fns.LoadOrStore(name, function) +} + +// LoadFunctionMap loads an initial function map. +func LoadFunctionMap() FunctionMap { + fm := &FunctionMap{} + fm.fns = &sync.Map{} + + fm.AddFunction("keyMatch", util.KeyMatchFunc) + fm.AddFunction("keyGet", util.KeyGetFunc) + fm.AddFunction("keyMatch2", util.KeyMatch2Func) + fm.AddFunction("keyGet2", util.KeyGet2Func) + fm.AddFunction("keyMatch3", util.KeyMatch3Func) + fm.AddFunction("keyGet3", util.KeyGet3Func) + fm.AddFunction("keyMatch4", util.KeyMatch4Func) + fm.AddFunction("keyMatch5", util.KeyMatch5Func) + fm.AddFunction("regexMatch", util.RegexMatchFunc) + fm.AddFunction("ipMatch", util.IPMatchFunc) + fm.AddFunction("globMatch", util.GlobMatchFunc) + + return *fm +} + +// GetFunctions return a map with all the functions. +func (fm *FunctionMap) GetFunctions() map[string]govaluate.ExpressionFunction { + ret := make(map[string]govaluate.ExpressionFunction) + + fm.fns.Range(func(k interface{}, v interface{}) bool { + ret[k.(string)] = v.(govaluate.ExpressionFunction) + return true + }) + + return ret +} diff --git a/vendor/github.com/casbin/casbin/v2/model/model.go b/vendor/github.com/casbin/casbin/v2/model/model.go new file mode 100644 index 0000000000..938072f452 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/model/model.go @@ -0,0 +1,434 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "container/list" + "errors" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/casbin/casbin/v2/config" + "github.com/casbin/casbin/v2/constant" + "github.com/casbin/casbin/v2/log" + "github.com/casbin/casbin/v2/util" +) + +// Model represents the whole access control model. +type Model map[string]AssertionMap + +// AssertionMap is the collection of assertions, can be "r", "p", "g", "e", "m". +type AssertionMap map[string]*Assertion + +const defaultDomain string = "" +const defaultSeparator = "::" + +var sectionNameMap = map[string]string{ + "r": "request_definition", + "p": "policy_definition", + "g": "role_definition", + "e": "policy_effect", + "m": "matchers", +} + +// Minimal required sections for a model to be valid. +var requiredSections = []string{"r", "p", "e", "m"} + +func loadAssertion(model Model, cfg config.ConfigInterface, sec string, key string) bool { + value := cfg.String(sectionNameMap[sec] + "::" + key) + return model.AddDef(sec, key, value) +} + +var paramsRegex = regexp.MustCompile(`\((.*?)\)`) + +// getParamsToken Get ParamsToken from Assertion.Value. +func getParamsToken(value string) []string { + paramsString := paramsRegex.FindString(value) + if paramsString == "" { + return nil + } + paramsString = strings.TrimSuffix(strings.TrimPrefix(paramsString, "("), ")") + return strings.Split(paramsString, ",") +} + +// AddDef adds an assertion to the model. +func (model Model) AddDef(sec string, key string, value string) bool { + if value == "" { + return false + } + + ast := Assertion{} + ast.Key = key + ast.Value = value + ast.PolicyMap = make(map[string]int) + ast.FieldIndexMap = make(map[string]int) + ast.setLogger(model.GetLogger()) + + if sec == "r" || sec == "p" { + ast.Tokens = strings.Split(ast.Value, ",") + for i := range ast.Tokens { + ast.Tokens[i] = key + "_" + strings.TrimSpace(ast.Tokens[i]) + } + } else if sec == "g" { + ast.ParamsTokens = getParamsToken(ast.Value) + ast.Tokens = strings.Split(ast.Value, ",") + ast.Tokens = ast.Tokens[:len(ast.Tokens)-len(ast.ParamsTokens)] + } else { + ast.Value = util.RemoveComments(util.EscapeAssertion(ast.Value)) + } + + if sec == "m" && strings.Contains(ast.Value, "in") { + ast.Value = strings.Replace(strings.Replace(ast.Value, "[", "(", -1), "]", ")", -1) + } + + _, ok := model[sec] + if !ok { + model[sec] = make(AssertionMap) + } + + model[sec][key] = &ast + return true +} + +func getKeySuffix(i int) string { + if i == 1 { + return "" + } + + return strconv.Itoa(i) +} + +func loadSection(model Model, cfg config.ConfigInterface, sec string) { + i := 1 + for { + if !loadAssertion(model, cfg, sec, sec+getKeySuffix(i)) { + break + } else { + i++ + } + } +} + +// SetLogger sets the model's logger. +func (model Model) SetLogger(logger log.Logger) { + for _, astMap := range model { + for _, ast := range astMap { + ast.logger = logger + } + } + model["logger"] = AssertionMap{"logger": &Assertion{logger: logger}} +} + +// GetLogger returns the model's logger. +func (model Model) GetLogger() log.Logger { + return model["logger"]["logger"].logger +} + +// NewModel creates an empty model. +func NewModel() Model { + m := make(Model) + m.SetLogger(&log.DefaultLogger{}) + + return m +} + +// NewModelFromFile creates a model from a .CONF file. +func NewModelFromFile(path string) (Model, error) { + m := NewModel() + + err := m.LoadModel(path) + if err != nil { + return nil, err + } + + return m, nil +} + +// NewModelFromString creates a model from a string which contains model text. +func NewModelFromString(text string) (Model, error) { + m := NewModel() + + err := m.LoadModelFromText(text) + if err != nil { + return nil, err + } + + return m, nil +} + +// LoadModel loads the model from model CONF file. +func (model Model) LoadModel(path string) error { + cfg, err := config.NewConfig(path) + if err != nil { + return err + } + + return model.loadModelFromConfig(cfg) +} + +// LoadModelFromText loads the model from the text. +func (model Model) LoadModelFromText(text string) error { + cfg, err := config.NewConfigFromText(text) + if err != nil { + return err + } + + return model.loadModelFromConfig(cfg) +} + +func (model Model) loadModelFromConfig(cfg config.ConfigInterface) error { + for s := range sectionNameMap { + loadSection(model, cfg, s) + } + ms := make([]string, 0) + for _, rs := range requiredSections { + if !model.hasSection(rs) { + ms = append(ms, sectionNameMap[rs]) + } + } + if len(ms) > 0 { + return fmt.Errorf("missing required sections: %s", strings.Join(ms, ",")) + } + return nil +} + +func (model Model) hasSection(sec string) bool { + section := model[sec] + return section != nil +} + +func (model Model) GetAssertion(sec string, ptype string) (*Assertion, error) { + if model[sec] == nil { + return nil, fmt.Errorf("missing required section %s", sec) + } + if model[sec][ptype] == nil { + return nil, fmt.Errorf("missiong required definition %s in section %s", ptype, sec) + } + return model[sec][ptype], nil +} + +// PrintModel prints the model to the log. +func (model Model) PrintModel() { + if !model.GetLogger().IsEnabled() { + return + } + + var modelInfo [][]string + for k, v := range model { + if k == "logger" { + continue + } + + for i, j := range v { + modelInfo = append(modelInfo, []string{k, i, j.Value}) + } + } + + model.GetLogger().LogModel(modelInfo) +} + +func (model Model) SortPoliciesBySubjectHierarchy() error { + if model["e"]["e"].Value != constant.SubjectPriorityEffect { + return nil + } + g, err := model.GetAssertion("g", "g") + if err != nil { + return err + } + subIndex := 0 + for ptype, assertion := range model["p"] { + domainIndex, err := model.GetFieldIndex(ptype, constant.DomainIndex) + if err != nil { + domainIndex = -1 + } + policies := assertion.Policy + subjectHierarchyMap, err := getSubjectHierarchyMap(g.Policy) + if err != nil { + return err + } + sort.SliceStable(policies, func(i, j int) bool { + domain1, domain2 := defaultDomain, defaultDomain + if domainIndex != -1 { + domain1 = policies[i][domainIndex] + domain2 = policies[j][domainIndex] + } + name1, name2 := getNameWithDomain(domain1, policies[i][subIndex]), getNameWithDomain(domain2, policies[j][subIndex]) + p1 := subjectHierarchyMap[name1] + p2 := subjectHierarchyMap[name2] + return p1 > p2 + }) + for i, policy := range assertion.Policy { + assertion.PolicyMap[strings.Join(policy, ",")] = i + } + } + return nil +} + +func getSubjectHierarchyMap(policies [][]string) (map[string]int, error) { + subjectHierarchyMap := make(map[string]int) + // Tree structure of role + policyMap := make(map[string][]string) + for _, policy := range policies { + if len(policy) < 2 { + return nil, errors.New("policy g expect 2 more params") + } + domain := defaultDomain + if len(policy) != 2 { + domain = policy[2] + } + child := getNameWithDomain(domain, policy[0]) + parent := getNameWithDomain(domain, policy[1]) + policyMap[parent] = append(policyMap[parent], child) + if _, ok := subjectHierarchyMap[child]; !ok { + subjectHierarchyMap[child] = 0 + } + if _, ok := subjectHierarchyMap[parent]; !ok { + subjectHierarchyMap[parent] = 0 + } + subjectHierarchyMap[child] = 1 + } + // Use queues for levelOrder + queue := list.New() + for k, v := range subjectHierarchyMap { + root := k + if v != 0 { + continue + } + lv := 0 + queue.PushBack(root) + for queue.Len() != 0 { + sz := queue.Len() + for i := 0; i < sz; i++ { + node := queue.Front() + queue.Remove(node) + nodeValue := node.Value.(string) + subjectHierarchyMap[nodeValue] = lv + if _, ok := policyMap[nodeValue]; ok { + for _, child := range policyMap[nodeValue] { + queue.PushBack(child) + } + } + } + lv++ + } + } + return subjectHierarchyMap, nil +} + +func getNameWithDomain(domain string, name string) string { + return domain + defaultSeparator + name +} + +func (model Model) SortPoliciesByPriority() error { + for ptype, assertion := range model["p"] { + priorityIndex, err := model.GetFieldIndex(ptype, constant.PriorityIndex) + if err != nil { + continue + } + policies := assertion.Policy + sort.SliceStable(policies, func(i, j int) bool { + p1, err := strconv.Atoi(policies[i][priorityIndex]) + if err != nil { + return true + } + p2, err := strconv.Atoi(policies[j][priorityIndex]) + if err != nil { + return true + } + return p1 < p2 + }) + for i, policy := range assertion.Policy { + assertion.PolicyMap[strings.Join(policy, ",")] = i + } + } + return nil +} + +func (model Model) ToText() string { + tokenPatterns := make(map[string]string) + + pPattern, rPattern := regexp.MustCompile("^p_"), regexp.MustCompile("^r_") + for _, ptype := range []string{"r", "p"} { + for _, token := range model[ptype][ptype].Tokens { + tokenPatterns[token] = rPattern.ReplaceAllString(pPattern.ReplaceAllString(token, "p."), "r.") + } + } + if strings.Contains(model["e"]["e"].Value, "p_eft") { + tokenPatterns["p_eft"] = "p.eft" + } + s := strings.Builder{} + writeString := func(sec string) { + for ptype := range model[sec] { + value := model[sec][ptype].Value + for tokenPattern, newToken := range tokenPatterns { + value = strings.Replace(value, tokenPattern, newToken, -1) + } + s.WriteString(fmt.Sprintf("%s = %s\n", sec, value)) + } + } + s.WriteString("[request_definition]\n") + writeString("r") + s.WriteString("[policy_definition]\n") + writeString("p") + if _, ok := model["g"]; ok { + s.WriteString("[role_definition]\n") + for ptype := range model["g"] { + s.WriteString(fmt.Sprintf("%s = %s\n", ptype, model["g"][ptype].Value)) + } + } + s.WriteString("[policy_effect]\n") + writeString("e") + s.WriteString("[matchers]\n") + writeString("m") + return s.String() +} + +func (model Model) Copy() Model { + newModel := NewModel() + + for sec, m := range model { + newAstMap := make(AssertionMap) + for ptype, ast := range m { + newAstMap[ptype] = ast.copy() + } + newModel[sec] = newAstMap + } + + newModel.SetLogger(model.GetLogger()) + return newModel +} + +func (model Model) GetFieldIndex(ptype string, field string) (int, error) { + assertion := model["p"][ptype] + if index, ok := assertion.FieldIndexMap[field]; ok { + return index, nil + } + pattern := fmt.Sprintf("%s_"+field, ptype) + index := -1 + for i, token := range assertion.Tokens { + if token == pattern { + index = i + break + } + } + if index == -1 { + return index, fmt.Errorf(field + " index is not set, please use enforcer.SetFieldIndex() to set index") + } + assertion.FieldIndexMap[field] = index + return index, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/model/policy.go b/vendor/github.com/casbin/casbin/v2/model/policy.go new file mode 100644 index 0000000000..875da09016 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/model/policy.go @@ -0,0 +1,482 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" + "strings" + + "github.com/casbin/casbin/v2/constant" + "github.com/casbin/casbin/v2/rbac" + "github.com/casbin/casbin/v2/util" +) + +type ( + PolicyOp int +) + +const ( + PolicyAdd PolicyOp = iota + PolicyRemove +) + +const DefaultSep = "," + +// BuildIncrementalRoleLinks provides incremental build the role inheritance relations. +func (model Model) BuildIncrementalRoleLinks(rmMap map[string]rbac.RoleManager, op PolicyOp, sec string, ptype string, rules [][]string) error { + if sec == "g" && rmMap[ptype] != nil { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return err + } + return model[sec][ptype].buildIncrementalRoleLinks(rmMap[ptype], op, rules) + } + return nil +} + +// BuildRoleLinks initializes the roles in RBAC. +func (model Model) BuildRoleLinks(rmMap map[string]rbac.RoleManager) error { + model.PrintPolicy() + for ptype, ast := range model["g"] { + if rm := rmMap[ptype]; rm != nil { + err := ast.buildRoleLinks(rm) + if err != nil { + return err + } + } + } + + return nil +} + +// BuildIncrementalConditionalRoleLinks provides incremental build the role inheritance relations. +func (model Model) BuildIncrementalConditionalRoleLinks(condRmMap map[string]rbac.ConditionalRoleManager, op PolicyOp, sec string, ptype string, rules [][]string) error { + if sec == "g" && condRmMap[ptype] != nil { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return err + } + return model[sec][ptype].buildIncrementalConditionalRoleLinks(condRmMap[ptype], op, rules) + } + return nil +} + +// BuildConditionalRoleLinks initializes the roles in RBAC. +func (model Model) BuildConditionalRoleLinks(condRmMap map[string]rbac.ConditionalRoleManager) error { + model.PrintPolicy() + for ptype, ast := range model["g"] { + if condRm := condRmMap[ptype]; condRm != nil { + err := ast.buildConditionalRoleLinks(condRm) + if err != nil { + return err + } + } + } + + return nil +} + +// PrintPolicy prints the policy to log. +func (model Model) PrintPolicy() { + if !model.GetLogger().IsEnabled() { + return + } + + policy := make(map[string][][]string) + + for key, ast := range model["p"] { + value, found := policy[key] + if found { + value = append(value, ast.Policy...) + policy[key] = value + } else { + policy[key] = ast.Policy + } + } + + for key, ast := range model["g"] { + value, found := policy[key] + if found { + value = append(value, ast.Policy...) + policy[key] = value + } else { + policy[key] = ast.Policy + } + } + + model.GetLogger().LogPolicy(policy) +} + +// ClearPolicy clears all current policy. +func (model Model) ClearPolicy() { + for _, ast := range model["p"] { + ast.Policy = nil + ast.PolicyMap = map[string]int{} + } + + for _, ast := range model["g"] { + ast.Policy = nil + ast.PolicyMap = map[string]int{} + } +} + +// GetPolicy gets all rules in a policy. +func (model Model) GetPolicy(sec string, ptype string) ([][]string, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return nil, err + } + return model[sec][ptype].Policy, nil +} + +// GetFilteredPolicy gets rules based on field filters from a policy. +func (model Model) GetFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return nil, err + } + res := [][]string{} + + for _, rule := range model[sec][ptype].Policy { + matched := true + for i, fieldValue := range fieldValues { + if fieldValue != "" && rule[fieldIndex+i] != fieldValue { + matched = false + break + } + } + + if matched { + res = append(res, rule) + } + } + + return res, nil +} + +// HasPolicyEx determines whether a model has the specified policy rule with error. +func (model Model) HasPolicyEx(sec string, ptype string, rule []string) (bool, error) { + assertion, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, err + } + switch sec { + case "p": + if len(rule) != len(assertion.Tokens) { + return false, fmt.Errorf( + "invalid policy rule size: expected %d, got %d, rule: %v", + len(model["p"][ptype].Tokens), + len(rule), + rule) + } + case "g": + if len(rule) < len(assertion.Tokens) { + return false, fmt.Errorf( + "invalid policy rule size: expected %d, got %d, rule: %v", + len(model["g"][ptype].Tokens), + len(rule), + rule) + } + } + return model.HasPolicy(sec, ptype, rule) +} + +// HasPolicy determines whether a model has the specified policy rule. +func (model Model) HasPolicy(sec string, ptype string, rule []string) (bool, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, err + } + _, ok := model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] + return ok, nil +} + +// HasPolicies determines whether a model has any of the specified policies. If one is found we return true. +func (model Model) HasPolicies(sec string, ptype string, rules [][]string) (bool, error) { + for i := 0; i < len(rules); i++ { + ok, err := model.HasPolicy(sec, ptype, rules[i]) + if err != nil { + return false, err + } + if ok { + return true, nil + } + } + + return false, nil +} + +// AddPolicy adds a policy rule to the model. +func (model Model) AddPolicy(sec string, ptype string, rule []string) error { + assertion, err := model.GetAssertion(sec, ptype) + if err != nil { + return err + } + assertion.Policy = append(assertion.Policy, rule) + assertion.PolicyMap[strings.Join(rule, DefaultSep)] = len(model[sec][ptype].Policy) - 1 + + hasPriority := false + if _, ok := assertion.FieldIndexMap[constant.PriorityIndex]; ok { + hasPriority = true + } + if sec == "p" && hasPriority { + if idxInsert, err := strconv.Atoi(rule[assertion.FieldIndexMap[constant.PriorityIndex]]); err == nil { + i := len(assertion.Policy) - 1 + for ; i > 0; i-- { + idx, err := strconv.Atoi(assertion.Policy[i-1][assertion.FieldIndexMap[constant.PriorityIndex]]) + if err != nil || idx <= idxInsert { + break + } + assertion.Policy[i] = assertion.Policy[i-1] + assertion.PolicyMap[strings.Join(assertion.Policy[i-1], DefaultSep)]++ + } + assertion.Policy[i] = rule + assertion.PolicyMap[strings.Join(rule, DefaultSep)] = i + } + } + return nil +} + +// AddPolicies adds policy rules to the model. +func (model Model) AddPolicies(sec string, ptype string, rules [][]string) error { + _, err := model.AddPoliciesWithAffected(sec, ptype, rules) + return err +} + +// AddPoliciesWithAffected adds policy rules to the model, and returns affected rules. +func (model Model) AddPoliciesWithAffected(sec string, ptype string, rules [][]string) ([][]string, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return nil, err + } + var affected [][]string + for _, rule := range rules { + hashKey := strings.Join(rule, DefaultSep) + _, ok := model[sec][ptype].PolicyMap[hashKey] + if ok { + continue + } + affected = append(affected, rule) + err = model.AddPolicy(sec, ptype, rule) + if err != nil { + return affected, err + } + } + return affected, err +} + +// RemovePolicy removes a policy rule from the model. +// Deprecated: Using AddPoliciesWithAffected instead. +func (model Model) RemovePolicy(sec string, ptype string, rule []string) (bool, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, err + } + index, ok := model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] + if !ok { + return false, err + } + + model[sec][ptype].Policy = append(model[sec][ptype].Policy[:index], model[sec][ptype].Policy[index+1:]...) + delete(model[sec][ptype].PolicyMap, strings.Join(rule, DefaultSep)) + for i := index; i < len(model[sec][ptype].Policy); i++ { + model[sec][ptype].PolicyMap[strings.Join(model[sec][ptype].Policy[i], DefaultSep)] = i + } + + return true, err +} + +// UpdatePolicy updates a policy rule from the model. +func (model Model) UpdatePolicy(sec string, ptype string, oldRule []string, newRule []string) (bool, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, err + } + oldPolicy := strings.Join(oldRule, DefaultSep) + index, ok := model[sec][ptype].PolicyMap[oldPolicy] + if !ok { + return false, nil + } + + model[sec][ptype].Policy[index] = newRule + delete(model[sec][ptype].PolicyMap, oldPolicy) + model[sec][ptype].PolicyMap[strings.Join(newRule, DefaultSep)] = index + + return true, nil +} + +// UpdatePolicies updates a policy rule from the model. +func (model Model) UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, err + } + rollbackFlag := false + // index -> []{oldIndex, newIndex} + modifiedRuleIndex := make(map[int][]int) + // rollback + defer func() { + if rollbackFlag { + for index, oldNewIndex := range modifiedRuleIndex { + model[sec][ptype].Policy[index] = oldRules[oldNewIndex[0]] + oldPolicy := strings.Join(oldRules[oldNewIndex[0]], DefaultSep) + newPolicy := strings.Join(newRules[oldNewIndex[1]], DefaultSep) + delete(model[sec][ptype].PolicyMap, newPolicy) + model[sec][ptype].PolicyMap[oldPolicy] = index + } + } + }() + + newIndex := 0 + for oldIndex, oldRule := range oldRules { + oldPolicy := strings.Join(oldRule, DefaultSep) + index, ok := model[sec][ptype].PolicyMap[oldPolicy] + if !ok { + rollbackFlag = true + return false, nil + } + + model[sec][ptype].Policy[index] = newRules[newIndex] + delete(model[sec][ptype].PolicyMap, oldPolicy) + model[sec][ptype].PolicyMap[strings.Join(newRules[newIndex], DefaultSep)] = index + modifiedRuleIndex[index] = []int{oldIndex, newIndex} + newIndex++ + } + + return true, nil +} + +// RemovePolicies removes policy rules from the model. +func (model Model) RemovePolicies(sec string, ptype string, rules [][]string) (bool, error) { + affected, err := model.RemovePoliciesWithAffected(sec, ptype, rules) + return len(affected) != 0, err +} + +// RemovePoliciesWithAffected removes policy rules from the model, and returns affected rules. +func (model Model) RemovePoliciesWithAffected(sec string, ptype string, rules [][]string) ([][]string, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return nil, err + } + var affected [][]string + for _, rule := range rules { + index, ok := model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] + if !ok { + continue + } + + affected = append(affected, rule) + model[sec][ptype].Policy = append(model[sec][ptype].Policy[:index], model[sec][ptype].Policy[index+1:]...) + delete(model[sec][ptype].PolicyMap, strings.Join(rule, DefaultSep)) + for i := index; i < len(model[sec][ptype].Policy); i++ { + model[sec][ptype].PolicyMap[strings.Join(model[sec][ptype].Policy[i], DefaultSep)] = i + } + } + return affected, nil +} + +// RemoveFilteredPolicy removes policy rules based on field filters from the model. +func (model Model) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, [][]string, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, nil, err + } + var tmp [][]string + var effects [][]string + res := false + model[sec][ptype].PolicyMap = map[string]int{} + + for _, rule := range model[sec][ptype].Policy { + matched := true + for i, fieldValue := range fieldValues { + if fieldValue != "" && rule[fieldIndex+i] != fieldValue { + matched = false + break + } + } + + if matched { + effects = append(effects, rule) + } else { + tmp = append(tmp, rule) + model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] = len(tmp) - 1 + } + } + + if len(tmp) != len(model[sec][ptype].Policy) { + model[sec][ptype].Policy = tmp + res = true + } + + return res, effects, nil +} + +// GetValuesForFieldInPolicy gets all values for a field for all rules in a policy, duplicated values are removed. +func (model Model) GetValuesForFieldInPolicy(sec string, ptype string, fieldIndex int) ([]string, error) { + values := []string{} + + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return nil, err + } + + for _, rule := range model[sec][ptype].Policy { + values = append(values, rule[fieldIndex]) + } + + util.ArrayRemoveDuplicates(&values) + + return values, nil +} + +// GetValuesForFieldInPolicyAllTypes gets all values for a field for all rules in a policy of all ptypes, duplicated values are removed. +func (model Model) GetValuesForFieldInPolicyAllTypes(sec string, fieldIndex int) ([]string, error) { + values := []string{} + + for ptype := range model[sec] { + v, err := model.GetValuesForFieldInPolicy(sec, ptype, fieldIndex) + if err != nil { + return nil, err + } + values = append(values, v...) + } + + util.ArrayRemoveDuplicates(&values) + + return values, nil +} + +// GetValuesForFieldInPolicyAllTypesByName gets all values for a field for all rules in a policy of all ptypes, duplicated values are removed. +func (model Model) GetValuesForFieldInPolicyAllTypesByName(sec string, field string) ([]string, error) { + values := []string{} + + for ptype := range model[sec] { + // GetFieldIndex will return (-1, err) if field is not found, ignore it + index, err := model.GetFieldIndex(ptype, field) + if err != nil { + continue + } + v, err := model.GetValuesForFieldInPolicy(sec, ptype, index) + if err != nil { + return nil, err + } + values = append(values, v...) + } + + util.ArrayRemoveDuplicates(&values) + + return values, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter.go b/vendor/github.com/casbin/casbin/v2/persist/adapter.go new file mode 100644 index 0000000000..0525657a10 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/adapter.go @@ -0,0 +1,74 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import ( + "encoding/csv" + "strings" + + "github.com/casbin/casbin/v2/model" +) + +// LoadPolicyLine loads a text line as a policy rule to model. +func LoadPolicyLine(line string, m model.Model) error { + if line == "" || strings.HasPrefix(line, "#") { + return nil + } + + r := csv.NewReader(strings.NewReader(line)) + r.Comma = ',' + r.Comment = '#' + r.TrimLeadingSpace = true + + tokens, err := r.Read() + if err != nil { + return err + } + + return LoadPolicyArray(tokens, m) +} + +// LoadPolicyArray loads a policy rule to model. +func LoadPolicyArray(rule []string, m model.Model) error { + key := rule[0] + sec := key[:1] + ok, err := m.HasPolicyEx(sec, key, rule[1:]) + if err != nil { + return err + } + if ok { + return nil // skip duplicated policy + } + m.AddPolicy(sec, key, rule[1:]) + return nil +} + +// Adapter is the interface for Casbin adapters. +type Adapter interface { + // LoadPolicy loads all policy rules from the storage. + LoadPolicy(model model.Model) error + // SavePolicy saves all policy rules to the storage. + SavePolicy(model model.Model) error + + // AddPolicy adds a policy rule to the storage. + // This is part of the Auto-Save feature. + AddPolicy(sec string, ptype string, rule []string) error + // RemovePolicy removes a policy rule from the storage. + // This is part of the Auto-Save feature. + RemovePolicy(sec string, ptype string, rule []string) error + // RemoveFilteredPolicy removes policy rules that match the filter from the storage. + // This is part of the Auto-Save feature. + RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go b/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go new file mode 100644 index 0000000000..bda78a7e21 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go @@ -0,0 +1,39 @@ +// Copyright 2023 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import ( + "context" + + "github.com/casbin/casbin/v2/model" +) + +// ContextAdapter provides a context-aware interface for Casbin adapters. +type ContextAdapter interface { + // LoadPolicyCtx loads all policy rules from the storage with context. + LoadPolicyCtx(ctx context.Context, model model.Model) error + // SavePolicyCtx saves all policy rules to the storage with context. + SavePolicyCtx(ctx context.Context, model model.Model) error + + // AddPolicyCtx adds a policy rule to the storage with context. + // This is part of the Auto-Save feature. + AddPolicyCtx(ctx context.Context, sec string, ptype string, rule []string) error + // RemovePolicyCtx removes a policy rule from the storage with context. + // This is part of the Auto-Save feature. + RemovePolicyCtx(ctx context.Context, sec string, ptype string, rule []string) error + // RemoveFilteredPolicyCtx removes policy rules that match the filter from the storage with context. + // This is part of the Auto-Save feature. + RemoveFilteredPolicyCtx(ctx context.Context, sec string, ptype string, fieldIndex int, fieldValues ...string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered.go b/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered.go new file mode 100644 index 0000000000..82c9a0e7cd --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered.go @@ -0,0 +1,29 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import ( + "github.com/casbin/casbin/v2/model" +) + +// FilteredAdapter is the interface for Casbin adapters supporting filtered policies. +type FilteredAdapter interface { + Adapter + + // LoadFilteredPolicy loads only policy rules that match the filter. + LoadFilteredPolicy(model model.Model, filter interface{}) error + // IsFiltered returns true if the loaded policy has been filtered. + IsFiltered() bool +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered_context.go b/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered_context.go new file mode 100644 index 0000000000..7893ce1bd3 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered_context.go @@ -0,0 +1,31 @@ +// Copyright 2024 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import ( + "context" + + "github.com/casbin/casbin/v2/model" +) + +// ContextFilteredAdapter is the context-aware interface for Casbin adapters supporting filtered policies. +type ContextFilteredAdapter interface { + ContextAdapter + + // LoadFilteredPolicyCtx loads only policy rules that match the filter. + LoadFilteredPolicyCtx(ctx context.Context, model model.Model, filter interface{}) error + // IsFilteredCtx returns true if the loaded policy has been filtered. + IsFilteredCtx(ctx context.Context) bool +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/batch_adapter.go b/vendor/github.com/casbin/casbin/v2/persist/batch_adapter.go new file mode 100644 index 0000000000..56ec415fe0 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/batch_adapter.go @@ -0,0 +1,26 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +// BatchAdapter is the interface for Casbin adapters with multiple add and remove policy functions. +type BatchAdapter interface { + Adapter + // AddPolicies adds policy rules to the storage. + // This is part of the Auto-Save feature. + AddPolicies(sec string, ptype string, rules [][]string) error + // RemovePolicies removes policy rules from the storage. + // This is part of the Auto-Save feature. + RemovePolicies(sec string, ptype string, rules [][]string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/batch_adapter_context.go b/vendor/github.com/casbin/casbin/v2/persist/batch_adapter_context.go new file mode 100644 index 0000000000..741c184d6d --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/batch_adapter_context.go @@ -0,0 +1,29 @@ +// Copyright 2024 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import "context" + +// ContextBatchAdapter is the context-aware interface for Casbin adapters with multiple add and remove policy functions. +type ContextBatchAdapter interface { + ContextAdapter + + // AddPoliciesCtx adds policy rules to the storage. + // This is part of the Auto-Save feature. + AddPoliciesCtx(ctx context.Context, sec string, ptype string, rules [][]string) error + // RemovePoliciesCtx removes policy rules from the storage. + // This is part of the Auto-Save feature. + RemovePoliciesCtx(ctx context.Context, sec string, ptype string, rules [][]string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/cache/cache.go b/vendor/github.com/casbin/casbin/v2/persist/cache/cache.go new file mode 100644 index 0000000000..08447b83c2 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/cache/cache.go @@ -0,0 +1,39 @@ +// Copyright 2021 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import "errors" + +var ErrNoSuchKey = errors.New("there's no such key existing in cache") + +type Cache interface { + // Set puts key and value into cache. + // First parameter for extra should be time.Time object denoting expected survival time. + // If survival time equals 0 or less, the key will always be survival. + Set(key string, value bool, extra ...interface{}) error + + // Get returns result for key, + // If there's no such key existing in cache, + // ErrNoSuchKey will be returned. + Get(key string) (bool, error) + + // Delete will remove the specific key in cache. + // If there's no such key existing in cache, + // ErrNoSuchKey will be returned. + Delete(key string) error + + // Clear deletes all the items stored in cache. + Clear() error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/cache/cache_sync.go b/vendor/github.com/casbin/casbin/v2/persist/cache/cache_sync.go new file mode 100644 index 0000000000..816e12dcc5 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/cache/cache_sync.go @@ -0,0 +1,86 @@ +// Copyright 2021 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import ( + "sync" + "time" +) + +type SyncCache struct { + cache DefaultCache + sync.RWMutex +} + +func (c *SyncCache) Set(key string, value bool, extra ...interface{}) error { + ttl := time.Duration(-1) + if len(extra) > 0 { + ttl = extra[0].(time.Duration) + } + c.Lock() + defer c.Unlock() + c.cache[key] = cacheItem{ + value: value, + expiresAt: time.Now().Add(ttl), + ttl: ttl, + } + return nil +} + +func (c *SyncCache) Get(key string) (bool, error) { + c.RLock() + res, ok := c.cache[key] + c.RUnlock() + if !ok { + return false, ErrNoSuchKey + } else { + if res.ttl > 0 && time.Now().After(res.expiresAt) { + c.Lock() + defer c.Unlock() + delete(c.cache, key) + return false, ErrNoSuchKey + } + return res.value, nil + } +} + +func (c *SyncCache) Delete(key string) error { + c.RLock() + _, ok := c.cache[key] + c.RUnlock() + if !ok { + return ErrNoSuchKey + } else { + c.Lock() + defer c.Unlock() + delete(c.cache, key) + return nil + } +} + +func (c *SyncCache) Clear() error { + c.Lock() + c.cache = make(DefaultCache) + c.Unlock() + return nil +} + +func NewSyncCache() (Cache, error) { + cache := SyncCache{ + make(DefaultCache), + sync.RWMutex{}, + } + return &cache, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/cache/default-cache.go b/vendor/github.com/casbin/casbin/v2/persist/cache/default-cache.go new file mode 100644 index 0000000000..9108e7d64d --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/cache/default-cache.go @@ -0,0 +1,69 @@ +// Copyright 2021 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import "time" + +type cacheItem struct { + value bool + expiresAt time.Time + ttl time.Duration +} + +type DefaultCache map[string]cacheItem + +func (c *DefaultCache) Set(key string, value bool, extra ...interface{}) error { + ttl := time.Duration(-1) + if len(extra) > 0 { + ttl = extra[0].(time.Duration) + } + (*c)[key] = cacheItem{ + value: value, + expiresAt: time.Now().Add(ttl), + ttl: ttl, + } + return nil +} + +func (c *DefaultCache) Get(key string) (bool, error) { + if res, ok := (*c)[key]; !ok { + return false, ErrNoSuchKey + } else { + if res.ttl > 0 && time.Now().After(res.expiresAt) { + delete(*c, key) + return false, ErrNoSuchKey + } + return res.value, nil + } +} + +func (c *DefaultCache) Delete(key string) error { + if _, ok := (*c)[key]; !ok { + return ErrNoSuchKey + } else { + delete(*c, key) + return nil + } +} + +func (c *DefaultCache) Clear() error { + *c = make(DefaultCache) + return nil +} + +func NewDefaultCache() (Cache, error) { + cache := make(DefaultCache) + return &cache, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/dispatcher.go b/vendor/github.com/casbin/casbin/v2/persist/dispatcher.go new file mode 100644 index 0000000000..ceaed83851 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/dispatcher.go @@ -0,0 +1,33 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +// Dispatcher is the interface for Casbin dispatcher. +type Dispatcher interface { + // AddPolicies adds policies rule to all instance. + AddPolicies(sec string, ptype string, rules [][]string) error + // RemovePolicies removes policies rule from all instance. + RemovePolicies(sec string, ptype string, rules [][]string) error + // RemoveFilteredPolicy removes policy rules that match the filter from all instance. + RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error + // ClearPolicy clears all current policy in all instances + ClearPolicy() error + // UpdatePolicy updates policy rule from all instance. + UpdatePolicy(sec string, ptype string, oldRule, newRule []string) error + // UpdatePolicies updates some policy rules from all instance + UpdatePolicies(sec string, ptype string, oldrules, newRules [][]string) error + // UpdateFilteredPolicies deletes old rules and adds new rules. + UpdateFilteredPolicies(sec string, ptype string, oldRules [][]string, newRules [][]string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter.go b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter.go new file mode 100644 index 0000000000..c68f0eaa41 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter.go @@ -0,0 +1,149 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileadapter + +import ( + "bufio" + "bytes" + "errors" + "os" + "strings" + + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" + "github.com/casbin/casbin/v2/util" +) + +// Adapter is the file adapter for Casbin. +// It can load policy from file or save policy to file. +type Adapter struct { + filePath string +} + +func (a *Adapter) UpdatePolicy(sec string, ptype string, oldRule, newRule []string) error { + return errors.New("not implemented") +} + +func (a *Adapter) UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error { + return errors.New("not implemented") +} + +func (a *Adapter) UpdateFilteredPolicies(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) { + return nil, errors.New("not implemented") +} + +// NewAdapter is the constructor for Adapter. +func NewAdapter(filePath string) *Adapter { + return &Adapter{filePath: filePath} +} + +// LoadPolicy loads all policy rules from the storage. +func (a *Adapter) LoadPolicy(model model.Model) error { + if a.filePath == "" { + return errors.New("invalid file path, file path cannot be empty") + } + + return a.loadPolicyFile(model, persist.LoadPolicyLine) +} + +// SavePolicy saves all policy rules to the storage. +func (a *Adapter) SavePolicy(model model.Model) error { + if a.filePath == "" { + return errors.New("invalid file path, file path cannot be empty") + } + + var tmp bytes.Buffer + + for ptype, ast := range model["p"] { + for _, rule := range ast.Policy { + tmp.WriteString(ptype + ", ") + tmp.WriteString(util.ArrayToString(rule)) + tmp.WriteString("\n") + } + } + + for ptype, ast := range model["g"] { + for _, rule := range ast.Policy { + tmp.WriteString(ptype + ", ") + tmp.WriteString(util.ArrayToString(rule)) + tmp.WriteString("\n") + } + } + + return a.savePolicyFile(strings.TrimRight(tmp.String(), "\n")) +} + +func (a *Adapter) loadPolicyFile(model model.Model, handler func(string, model.Model) error) error { + f, err := os.Open(a.filePath) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + err = handler(line, model) + if err != nil { + return err + } + } + return scanner.Err() +} + +func (a *Adapter) savePolicyFile(text string) error { + f, err := os.Create(a.filePath) + if err != nil { + return err + } + w := bufio.NewWriter(f) + + _, err = w.WriteString(text) + if err != nil { + return err + } + + err = w.Flush() + if err != nil { + return err + } + + return f.Close() +} + +// AddPolicy adds a policy rule to the storage. +func (a *Adapter) AddPolicy(sec string, ptype string, rule []string) error { + return errors.New("not implemented") +} + +// AddPolicies adds policy rules to the storage. +func (a *Adapter) AddPolicies(sec string, ptype string, rules [][]string) error { + return errors.New("not implemented") +} + +// RemovePolicy removes a policy rule from the storage. +func (a *Adapter) RemovePolicy(sec string, ptype string, rule []string) error { + return errors.New("not implemented") +} + +// RemovePolicies removes policy rules from the storage. +func (a *Adapter) RemovePolicies(sec string, ptype string, rules [][]string) error { + return errors.New("not implemented") +} + +// RemoveFilteredPolicy removes policy rules that match the filter from the storage. +func (a *Adapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { + return errors.New("not implemented") +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_filtered.go b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_filtered.go new file mode 100644 index 0000000000..1a074c9a9e --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_filtered.go @@ -0,0 +1,156 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileadapter + +import ( + "bufio" + "errors" + "os" + "strings" + + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" +) + +// FilteredAdapter is the filtered file adapter for Casbin. It can load policy +// from file or save policy to file and supports loading of filtered policies. +type FilteredAdapter struct { + *Adapter + filtered bool +} + +// Filter defines the filtering rules for a FilteredAdapter's policy. Empty values +// are ignored, but all others must match the filter. +type Filter struct { + P []string + G []string + G1 []string + G2 []string + G3 []string + G4 []string + G5 []string +} + +// NewFilteredAdapter is the constructor for FilteredAdapter. +func NewFilteredAdapter(filePath string) *FilteredAdapter { + a := FilteredAdapter{} + a.filtered = true + a.Adapter = NewAdapter(filePath) + return &a +} + +// LoadPolicy loads all policy rules from the storage. +func (a *FilteredAdapter) LoadPolicy(model model.Model) error { + a.filtered = false + return a.Adapter.LoadPolicy(model) +} + +// LoadFilteredPolicy loads only policy rules that match the filter. +func (a *FilteredAdapter) LoadFilteredPolicy(model model.Model, filter interface{}) error { + if filter == nil { + return a.LoadPolicy(model) + } + if a.filePath == "" { + return errors.New("invalid file path, file path cannot be empty") + } + + filterValue, ok := filter.(*Filter) + if !ok { + return errors.New("invalid filter type") + } + err := a.loadFilteredPolicyFile(model, filterValue, persist.LoadPolicyLine) + if err == nil { + a.filtered = true + } + return err +} + +func (a *FilteredAdapter) loadFilteredPolicyFile(model model.Model, filter *Filter, handler func(string, model.Model) error) error { + f, err := os.Open(a.filePath) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + if filterLine(line, filter) { + continue + } + + err = handler(line, model) + if err != nil { + return err + } + } + return scanner.Err() +} + +// IsFiltered returns true if the loaded policy has been filtered. +func (a *FilteredAdapter) IsFiltered() bool { + return a.filtered +} + +// SavePolicy saves all policy rules to the storage. +func (a *FilteredAdapter) SavePolicy(model model.Model) error { + if a.filtered { + return errors.New("cannot save a filtered policy") + } + return a.Adapter.SavePolicy(model) +} + +func filterLine(line string, filter *Filter) bool { + if filter == nil { + return false + } + p := strings.Split(line, ",") + if len(p) == 0 { + return true + } + var filterSlice []string + switch strings.TrimSpace(p[0]) { + case "p": + filterSlice = filter.P + case "g": + filterSlice = filter.G + case "g1": + filterSlice = filter.G1 + case "g2": + filterSlice = filter.G2 + case "g3": + filterSlice = filter.G3 + case "g4": + filterSlice = filter.G4 + case "g5": + filterSlice = filter.G5 + } + return filterWords(p, filterSlice) +} + +func filterWords(line []string, filter []string) bool { + if len(line) < len(filter)+1 { + return true + } + var skipLine bool + for i, v := range filter { + if len(v) > 0 && strings.TrimSpace(v) != strings.TrimSpace(line[i+1]) { + skipLine = true + break + } + } + return skipLine +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_mock.go b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_mock.go new file mode 100644 index 0000000000..fcc5f82185 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_mock.go @@ -0,0 +1,122 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileadapter + +import ( + "bufio" + "errors" + "io" + "os" + "strings" + + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" +) + +// AdapterMock is the file adapter for Casbin. +// It can load policy from file or save policy to file. +type AdapterMock struct { + filePath string + errorValue string +} + +// NewAdapterMock is the constructor for AdapterMock. +func NewAdapterMock(filePath string) *AdapterMock { + a := AdapterMock{} + a.filePath = filePath + return &a +} + +// LoadPolicy loads all policy rules from the storage. +func (a *AdapterMock) LoadPolicy(model model.Model) error { + err := a.loadPolicyFile(model, persist.LoadPolicyLine) + return err +} + +// SavePolicy saves all policy rules to the storage. +func (a *AdapterMock) SavePolicy(model model.Model) error { + return nil +} + +func (a *AdapterMock) loadPolicyFile(model model.Model, handler func(string, model.Model) error) error { + f, err := os.Open(a.filePath) + if err != nil { + return err + } + defer f.Close() + + buf := bufio.NewReader(f) + for { + line, err := buf.ReadString('\n') + line = strings.TrimSpace(line) + if err2 := handler(line, model); err2 != nil { + return err2 + } + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } +} + +// SetMockErr sets string to be returned by of the mock during testing. +func (a *AdapterMock) SetMockErr(errorToSet string) { + a.errorValue = errorToSet +} + +// GetMockErr returns a mock error or nil. +func (a *AdapterMock) GetMockErr() error { + var returnError error + if a.errorValue != "" { + returnError = errors.New(a.errorValue) + } + return returnError +} + +// AddPolicy adds a policy rule to the storage. +func (a *AdapterMock) AddPolicy(sec string, ptype string, rule []string) error { + return a.GetMockErr() +} + +// AddPolicies removes policy rules from the storage. +func (a *AdapterMock) AddPolicies(sec string, ptype string, rules [][]string) error { + return a.GetMockErr() +} + +// RemovePolicy removes a policy rule from the storage. +func (a *AdapterMock) RemovePolicy(sec string, ptype string, rule []string) error { + return a.GetMockErr() +} + +// RemovePolicies removes policy rules from the storage. +func (a *AdapterMock) RemovePolicies(sec string, ptype string, rules [][]string) error { + return a.GetMockErr() +} + +// UpdatePolicy removes a policy rule from the storage. +func (a *AdapterMock) UpdatePolicy(sec string, ptype string, oldRule, newPolicy []string) error { + return a.GetMockErr() +} + +func (a *AdapterMock) UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error { + return a.GetMockErr() +} + +// RemoveFilteredPolicy removes policy rules that match the filter from the storage. +func (a *AdapterMock) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { + return a.GetMockErr() +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/update_adapter.go b/vendor/github.com/casbin/casbin/v2/persist/update_adapter.go new file mode 100644 index 0000000000..fe9204afd4 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/update_adapter.go @@ -0,0 +1,27 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +// UpdatableAdapter is the interface for Casbin adapters with add update policy function. +type UpdatableAdapter interface { + Adapter + // UpdatePolicy updates a policy rule from storage. + // This is part of the Auto-Save feature. + UpdatePolicy(sec string, ptype string, oldRule, newRule []string) error + // UpdatePolicies updates some policy rules to storage, like db, redis. + UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error + // UpdateFilteredPolicies deletes old rules and adds new rules. + UpdateFilteredPolicies(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/update_adapter_context.go b/vendor/github.com/casbin/casbin/v2/persist/update_adapter_context.go new file mode 100644 index 0000000000..55b8ba9df6 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/update_adapter_context.go @@ -0,0 +1,30 @@ +// Copyright 2024 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import "context" + +// ContextUpdatableAdapter is the context-aware interface for Casbin adapters with add update policy function. +type ContextUpdatableAdapter interface { + ContextAdapter + + // UpdatePolicyCtx updates a policy rule from storage. + // This is part of the Auto-Save feature. + UpdatePolicyCtx(ctx context.Context, sec string, ptype string, oldRule, newRule []string) error + // UpdatePoliciesCtx updates some policy rules to storage, like db, redis. + UpdatePoliciesCtx(ctx context.Context, sec string, ptype string, oldRules, newRules [][]string) error + // UpdateFilteredPoliciesCtx deletes old rules and adds new rules. + UpdateFilteredPoliciesCtx(ctx context.Context, sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/watcher.go b/vendor/github.com/casbin/casbin/v2/persist/watcher.go new file mode 100644 index 0000000000..0d843606b4 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/watcher.go @@ -0,0 +1,29 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +// Watcher is the interface for Casbin watchers. +type Watcher interface { + // SetUpdateCallback sets the callback function that the watcher will call + // when the policy in DB has been changed by other instances. + // A classic callback is Enforcer.LoadPolicy(). + SetUpdateCallback(func(string)) error + // Update calls the update callback of other instances to synchronize their policy. + // It is usually called after changing the policy in DB, like Enforcer.SavePolicy(), + // Enforcer.AddPolicy(), Enforcer.RemovePolicy(), etc. + Update() error + // Close stops and releases the watcher, the callback function will not be called any more. + Close() +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/watcher_ex.go b/vendor/github.com/casbin/casbin/v2/persist/watcher_ex.go new file mode 100644 index 0000000000..1c6f4299cd --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/watcher_ex.go @@ -0,0 +1,40 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import "github.com/casbin/casbin/v2/model" + +// WatcherEx is the strengthened Casbin watchers. +type WatcherEx interface { + Watcher + // UpdateForAddPolicy calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.AddPolicy() + UpdateForAddPolicy(sec, ptype string, params ...string) error + // UpdateForRemovePolicy calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.RemovePolicy() + UpdateForRemovePolicy(sec, ptype string, params ...string) error + // UpdateForRemoveFilteredPolicy calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.RemoveFilteredNamedGroupingPolicy() + UpdateForRemoveFilteredPolicy(sec, ptype string, fieldIndex int, fieldValues ...string) error + // UpdateForSavePolicy calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.RemoveFilteredNamedGroupingPolicy() + UpdateForSavePolicy(model model.Model) error + // UpdateForAddPolicies calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.AddPolicies() + UpdateForAddPolicies(sec string, ptype string, rules ...[]string) error + // UpdateForRemovePolicies calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.RemovePolicies() + UpdateForRemovePolicies(sec string, ptype string, rules ...[]string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/watcher_update.go b/vendor/github.com/casbin/casbin/v2/persist/watcher_update.go new file mode 100644 index 0000000000..694123c463 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/watcher_update.go @@ -0,0 +1,26 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +// UpdatableWatcher is strengthened for Casbin watchers. +type UpdatableWatcher interface { + Watcher + // UpdateForUpdatePolicy calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.UpdatePolicy() + UpdateForUpdatePolicy(sec string, ptype string, oldRule, newRule []string) error + // UpdateForUpdatePolicies calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.UpdatePolicies() + UpdateForUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go new file mode 100644 index 0000000000..dcaa37f764 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go @@ -0,0 +1,46 @@ +// Copyright 2023 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rbac + +import "context" + +// ContextRoleManager provides a context-aware interface to define the operations for managing roles. +// Prefer this over RoleManager interface for context propagation, which is useful for things like handling +// request timeouts. +type ContextRoleManager interface { + RoleManager + + // ClearCtx clears all stored data and resets the role manager to the initial state with context. + ClearCtx(ctx context.Context) error + // AddLinkCtx adds the inheritance link between two roles. role: name1 and role: name2 with context. + // domain is a prefix to the roles (can be used for other purposes). + AddLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) error + // DeleteLinkCtx deletes the inheritance link between two roles. role: name1 and role: name2 with context. + // domain is a prefix to the roles (can be used for other purposes). + DeleteLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) error + // HasLinkCtx determines whether a link exists between two roles. role: name1 inherits role: name2 with context. + // domain is a prefix to the roles (can be used for other purposes). + HasLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) (bool, error) + // GetRolesCtx gets the roles that a user inherits with context. + // domain is a prefix to the roles (can be used for other purposes). + GetRolesCtx(ctx context.Context, name string, domain ...string) ([]string, error) + // GetUsersCtx gets the users that inherits a role with context. + // domain is a prefix to the users (can be used for other purposes). + GetUsersCtx(ctx context.Context, name string, domain ...string) ([]string, error) + // GetDomainsCtx gets domains that a user has with context. + GetDomainsCtx(ctx context.Context, name string) ([]string, error) + // GetAllDomainsCtx gets all domains with context. + GetAllDomainsCtx(ctx context.Context) ([]string, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go new file mode 100644 index 0000000000..a6ae8693ce --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go @@ -0,0 +1,1014 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaultrolemanager + +import ( + "fmt" + "strings" + "sync" + + "github.com/casbin/casbin/v2/log" + "github.com/casbin/casbin/v2/rbac" + "github.com/casbin/casbin/v2/util" +) + +const defaultDomain string = "" + +// Role represents the data structure for a role in RBAC. +type Role struct { + name string + roles *sync.Map + users *sync.Map + matched *sync.Map + matchedBy *sync.Map + linkConditionFuncMap *sync.Map + linkConditionFuncParamsMap *sync.Map +} + +func newRole(name string) *Role { + r := Role{} + r.name = name + r.roles = &sync.Map{} + r.users = &sync.Map{} + r.matched = &sync.Map{} + r.matchedBy = &sync.Map{} + r.linkConditionFuncMap = &sync.Map{} + r.linkConditionFuncParamsMap = &sync.Map{} + return &r +} + +func (r *Role) addRole(role *Role) { + r.roles.Store(role.name, role) + role.addUser(r) +} + +func (r *Role) removeRole(role *Role) { + r.roles.Delete(role.name) + role.removeUser(r) +} + +// should only be called inside addRole. +func (r *Role) addUser(user *Role) { + r.users.Store(user.name, user) +} + +// should only be called inside removeRole. +func (r *Role) removeUser(user *Role) { + r.users.Delete(user.name) +} + +func (r *Role) addMatch(role *Role) { + r.matched.Store(role.name, role) + role.matchedBy.Store(r.name, r) +} + +func (r *Role) removeMatch(role *Role) { + r.matched.Delete(role.name) + role.matchedBy.Delete(r.name) +} + +func (r *Role) removeMatches() { + r.matched.Range(func(key, value interface{}) bool { + r.removeMatch(value.(*Role)) + return true + }) + r.matchedBy.Range(func(key, value interface{}) bool { + value.(*Role).removeMatch(r) + return true + }) +} + +func (r *Role) rangeRoles(fn func(key, value interface{}) bool) { + r.roles.Range(fn) + r.roles.Range(func(key, value interface{}) bool { + role := value.(*Role) + role.matched.Range(fn) + return true + }) + r.matchedBy.Range(func(key, value interface{}) bool { + role := value.(*Role) + role.roles.Range(fn) + return true + }) +} + +func (r *Role) rangeUsers(fn func(key, value interface{}) bool) { + r.users.Range(fn) + r.users.Range(func(key, value interface{}) bool { + role := value.(*Role) + role.matched.Range(fn) + return true + }) + r.matchedBy.Range(func(key, value interface{}) bool { + role := value.(*Role) + role.users.Range(fn) + return true + }) +} + +func (r *Role) toString() string { + roles := r.getRoles() + + if len(roles) == 0 { + return "" + } + + var sb strings.Builder + sb.WriteString(r.name) + sb.WriteString(" < ") + if len(roles) != 1 { + sb.WriteString("(") + } + + for i, role := range roles { + if i == 0 { + sb.WriteString(role) + } else { + sb.WriteString(", ") + sb.WriteString(role) + } + } + + if len(roles) != 1 { + sb.WriteString(")") + } + + return sb.String() +} + +func (r *Role) getRoles() []string { + var names []string + r.rangeRoles(func(key, value interface{}) bool { + names = append(names, key.(string)) + return true + }) + return util.RemoveDuplicateElement(names) +} + +func (r *Role) getUsers() []string { + var names []string + r.rangeUsers(func(key, value interface{}) bool { + names = append(names, key.(string)) + return true + }) + return names +} + +type linkConditionFuncKey struct { + roleName string + domainName string +} + +func (r *Role) addLinkConditionFunc(role *Role, domain string, fn rbac.LinkConditionFunc) { + r.linkConditionFuncMap.Store(linkConditionFuncKey{role.name, domain}, fn) +} + +func (r *Role) getLinkConditionFunc(role *Role, domain string) (rbac.LinkConditionFunc, bool) { + fn, ok := r.linkConditionFuncMap.Load(linkConditionFuncKey{role.name, domain}) + if fn == nil { + return nil, ok + } + return fn.(rbac.LinkConditionFunc), ok +} + +func (r *Role) setLinkConditionFuncParams(role *Role, domain string, params ...string) { + r.linkConditionFuncParamsMap.Store(linkConditionFuncKey{role.name, domain}, params) +} + +func (r *Role) getLinkConditionFuncParams(role *Role, domain string) ([]string, bool) { + params, ok := r.linkConditionFuncParamsMap.Load(linkConditionFuncKey{role.name, domain}) + if params == nil { + return nil, ok + } + return params.([]string), ok +} + +// RoleManagerImpl provides a default implementation for the RoleManager interface. +type RoleManagerImpl struct { + allRoles *sync.Map + maxHierarchyLevel int + matchingFunc rbac.MatchingFunc + domainMatchingFunc rbac.MatchingFunc + logger log.Logger + matchingFuncCache *util.SyncLRUCache +} + +// NewRoleManagerImpl is the constructor for creating an instance of the +// default RoleManager implementation. +func NewRoleManagerImpl(maxHierarchyLevel int) *RoleManagerImpl { + rm := RoleManagerImpl{} + _ = rm.Clear() // init allRoles and matchingFuncCache + rm.maxHierarchyLevel = maxHierarchyLevel + rm.SetLogger(&log.DefaultLogger{}) + return &rm +} + +// use this constructor to avoid rebuild of AddMatchingFunc. +func newRoleManagerWithMatchingFunc(maxHierarchyLevel int, fn rbac.MatchingFunc) *RoleManagerImpl { + rm := NewRoleManagerImpl(maxHierarchyLevel) + rm.matchingFunc = fn + return rm +} + +// rebuilds role cache. +func (rm *RoleManagerImpl) rebuild() { + roles := rm.allRoles + _ = rm.Clear() + rangeLinks(roles, func(name1, name2 string, domain ...string) bool { + _ = rm.AddLink(name1, name2, domain...) + return true + }) +} + +func (rm *RoleManagerImpl) Match(str string, pattern string) bool { + if str == pattern { + return true + } + + if rm.matchingFunc != nil { + return rm.matchingFunc(str, pattern) + } else { + return false + } +} + +func (rm *RoleManagerImpl) rangeMatchingRoles(name string, isPattern bool, fn func(role *Role) bool) { + rm.allRoles.Range(func(key, value interface{}) bool { + name2 := key.(string) + if isPattern && name != name2 && rm.Match(name2, name) { + fn(value.(*Role)) + } else if !isPattern && name != name2 && rm.Match(name, name2) { + fn(value.(*Role)) + } + return true + }) +} + +func (rm *RoleManagerImpl) load(name interface{}) (value *Role, ok bool) { + if r, ok := rm.allRoles.Load(name); ok { + return r.(*Role), true + } + return nil, false +} + +// loads or creates a role. +func (rm *RoleManagerImpl) getRole(name string) (r *Role, created bool) { + var role *Role + var ok bool + + if role, ok = rm.load(name); !ok { + role = newRole(name) + rm.allRoles.Store(name, role) + + if rm.matchingFunc != nil { + rm.rangeMatchingRoles(name, false, func(r *Role) bool { + r.addMatch(role) + return true + }) + + rm.rangeMatchingRoles(name, true, func(r *Role) bool { + role.addMatch(r) + return true + }) + } + } + + return role, !ok +} + +func loadAndDelete(m *sync.Map, name string) (value interface{}, loaded bool) { + value, loaded = m.Load(name) + if loaded { + m.Delete(name) + } + return value, loaded +} + +func (rm *RoleManagerImpl) removeRole(name string) { + if role, ok := loadAndDelete(rm.allRoles, name); ok { + role.(*Role).removeMatches() + } +} + +// AddMatchingFunc support use pattern in g. +func (rm *RoleManagerImpl) AddMatchingFunc(name string, fn rbac.MatchingFunc) { + rm.matchingFunc = fn + rm.rebuild() +} + +// AddDomainMatchingFunc support use domain pattern in g. +func (rm *RoleManagerImpl) AddDomainMatchingFunc(name string, fn rbac.MatchingFunc) { + rm.domainMatchingFunc = fn +} + +// SetLogger sets role manager's logger. +func (rm *RoleManagerImpl) SetLogger(logger log.Logger) { + rm.logger = logger +} + +// Clear clears all stored data and resets the role manager to the initial state. +func (rm *RoleManagerImpl) Clear() error { + rm.matchingFuncCache = util.NewSyncLRUCache(100) + rm.allRoles = &sync.Map{} + return nil +} + +// AddLink adds the inheritance link between role: name1 and role: name2. +// aka role: name1 inherits role: name2. +func (rm *RoleManagerImpl) AddLink(name1 string, name2 string, domains ...string) error { + user, _ := rm.getRole(name1) + role, _ := rm.getRole(name2) + user.addRole(role) + return nil +} + +// DeleteLink deletes the inheritance link between role: name1 and role: name2. +// aka role: name1 does not inherit role: name2 any more. +func (rm *RoleManagerImpl) DeleteLink(name1 string, name2 string, domains ...string) error { + user, _ := rm.getRole(name1) + role, _ := rm.getRole(name2) + user.removeRole(role) + return nil +} + +// HasLink determines whether role: name1 inherits role: name2. +func (rm *RoleManagerImpl) HasLink(name1 string, name2 string, domains ...string) (bool, error) { + if name1 == name2 || (rm.matchingFunc != nil && rm.Match(name1, name2)) { + return true, nil + } + + user, userCreated := rm.getRole(name1) + role, roleCreated := rm.getRole(name2) + + if userCreated { + defer rm.removeRole(user.name) + } + if roleCreated { + defer rm.removeRole(role.name) + } + + return rm.hasLinkHelper(role.name, map[string]*Role{user.name: user}, rm.maxHierarchyLevel), nil +} + +func (rm *RoleManagerImpl) hasLinkHelper(targetName string, roles map[string]*Role, level int) bool { + if level < 0 || len(roles) == 0 { + return false + } + + nextRoles := map[string]*Role{} + for _, role := range roles { + if targetName == role.name || (rm.matchingFunc != nil && rm.Match(role.name, targetName)) { + return true + } + role.rangeRoles(func(key, value interface{}) bool { + nextRoles[key.(string)] = value.(*Role) + return true + }) + } + + return rm.hasLinkHelper(targetName, nextRoles, level-1) +} + +// GetRoles gets the roles that a user inherits. +func (rm *RoleManagerImpl) GetRoles(name string, domains ...string) ([]string, error) { + user, created := rm.getRole(name) + if created { + defer rm.removeRole(user.name) + } + return user.getRoles(), nil +} + +// GetUsers gets the users of a role. +// domain is an unreferenced parameter here, may be used in other implementations. +func (rm *RoleManagerImpl) GetUsers(name string, domain ...string) ([]string, error) { + role, created := rm.getRole(name) + if created { + defer rm.removeRole(role.name) + } + return role.getUsers(), nil +} + +func (rm *RoleManagerImpl) toString() []string { + var roles []string + + rm.allRoles.Range(func(key, value interface{}) bool { + role := value.(*Role) + if text := role.toString(); text != "" { + roles = append(roles, text) + } + return true + }) + + return roles +} + +// PrintRoles prints all the roles to log. +func (rm *RoleManagerImpl) PrintRoles() error { + if !(rm.logger).IsEnabled() { + return nil + } + roles := rm.toString() + rm.logger.LogRole(roles) + return nil +} + +// GetDomains gets domains that a user has. +func (rm *RoleManagerImpl) GetDomains(name string) ([]string, error) { + domains := []string{defaultDomain} + return domains, nil +} + +// GetAllDomains gets all domains. +func (rm *RoleManagerImpl) GetAllDomains() ([]string, error) { + domains := []string{defaultDomain} + return domains, nil +} + +func (rm *RoleManagerImpl) copyFrom(other *RoleManagerImpl) { + other.Range(func(name1, name2 string, domain ...string) bool { + _ = rm.AddLink(name1, name2, domain...) + return true + }) +} + +func rangeLinks(users *sync.Map, fn func(name1, name2 string, domain ...string) bool) { + users.Range(func(_, value interface{}) bool { + user := value.(*Role) + user.roles.Range(func(key, _ interface{}) bool { + roleName := key.(string) + return fn(user.name, roleName, defaultDomain) + }) + return true + }) +} + +func (rm *RoleManagerImpl) Range(fn func(name1, name2 string, domain ...string) bool) { + rangeLinks(rm.allRoles, fn) +} + +// Deprecated: BuildRelationship is no longer required. +func (rm *RoleManagerImpl) BuildRelationship(name1 string, name2 string, domain ...string) error { + return nil +} + +type DomainManager struct { + rmMap *sync.Map + maxHierarchyLevel int + matchingFunc rbac.MatchingFunc + domainMatchingFunc rbac.MatchingFunc + logger log.Logger + matchingFuncCache *util.SyncLRUCache +} + +// NewDomainManager is the constructor for creating an instance of the +// default DomainManager implementation. +func NewDomainManager(maxHierarchyLevel int) *DomainManager { + dm := &DomainManager{} + _ = dm.Clear() // init rmMap and rmCache + dm.maxHierarchyLevel = maxHierarchyLevel + return dm +} + +// SetLogger sets role manager's logger. +func (dm *DomainManager) SetLogger(logger log.Logger) { + dm.logger = logger +} + +// AddMatchingFunc support use pattern in g. +func (dm *DomainManager) AddMatchingFunc(name string, fn rbac.MatchingFunc) { + dm.matchingFunc = fn + dm.rmMap.Range(func(key, value interface{}) bool { + value.(*RoleManagerImpl).AddMatchingFunc(name, fn) + return true + }) +} + +// AddDomainMatchingFunc support use domain pattern in g. +func (dm *DomainManager) AddDomainMatchingFunc(name string, fn rbac.MatchingFunc) { + dm.domainMatchingFunc = fn + dm.rmMap.Range(func(key, value interface{}) bool { + value.(*RoleManagerImpl).AddDomainMatchingFunc(name, fn) + return true + }) + dm.rebuild() +} + +// clears the map of RoleManagers. +func (dm *DomainManager) rebuild() { + rmMap := dm.rmMap + _ = dm.Clear() + rmMap.Range(func(key, value interface{}) bool { + domain := key.(string) + rm := value.(*RoleManagerImpl) + + rm.Range(func(name1, name2 string, _ ...string) bool { + _ = dm.AddLink(name1, name2, domain) + return true + }) + return true + }) +} + +// Clear clears all stored data and resets the role manager to the initial state. +func (dm *DomainManager) Clear() error { + dm.rmMap = &sync.Map{} + dm.matchingFuncCache = util.NewSyncLRUCache(100) + return nil +} + +func (dm *DomainManager) getDomain(domains ...string) (domain string, err error) { + switch len(domains) { + case 0: + return defaultDomain, nil + default: + return domains[0], nil + } +} + +func (dm *DomainManager) Match(str string, pattern string) bool { + if str == pattern { + return true + } + + if dm.domainMatchingFunc != nil { + return dm.domainMatchingFunc(str, pattern) + } else { + return false + } +} + +func (dm *DomainManager) rangeAffectedRoleManagers(domain string, fn func(rm *RoleManagerImpl)) { + if dm.domainMatchingFunc != nil { + dm.rmMap.Range(func(key, value interface{}) bool { + domain2 := key.(string) + if domain != domain2 && dm.Match(domain2, domain) { + fn(value.(*RoleManagerImpl)) + } + return true + }) + } +} + +func (dm *DomainManager) load(name interface{}) (value *RoleManagerImpl, ok bool) { + if r, ok := dm.rmMap.Load(name); ok { + return r.(*RoleManagerImpl), true + } + return nil, false +} + +// load or create a RoleManager instance of domain. +func (dm *DomainManager) getRoleManager(domain string, store bool) *RoleManagerImpl { + var rm *RoleManagerImpl + var ok bool + + if rm, ok = dm.load(domain); !ok { + rm = newRoleManagerWithMatchingFunc(dm.maxHierarchyLevel, dm.matchingFunc) + if store { + dm.rmMap.Store(domain, rm) + } + if dm.domainMatchingFunc != nil { + dm.rmMap.Range(func(key, value interface{}) bool { + domain2 := key.(string) + rm2 := value.(*RoleManagerImpl) + if domain != domain2 && dm.Match(domain, domain2) { + rm.copyFrom(rm2) + } + return true + }) + } + } + return rm +} + +// AddLink adds the inheritance link between role: name1 and role: name2. +// aka role: name1 inherits role: name2. +func (dm *DomainManager) AddLink(name1 string, name2 string, domains ...string) error { + domain, err := dm.getDomain(domains...) + if err != nil { + return err + } + roleManager := dm.getRoleManager(domain, true) // create role manager if it does not exist + _ = roleManager.AddLink(name1, name2, domains...) + + dm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { + _ = rm.AddLink(name1, name2, domains...) + }) + return nil +} + +// DeleteLink deletes the inheritance link between role: name1 and role: name2. +// aka role: name1 does not inherit role: name2 any more. +func (dm *DomainManager) DeleteLink(name1 string, name2 string, domains ...string) error { + domain, err := dm.getDomain(domains...) + if err != nil { + return err + } + roleManager := dm.getRoleManager(domain, true) // create role manager if it does not exist + _ = roleManager.DeleteLink(name1, name2, domains...) + + dm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { + _ = rm.DeleteLink(name1, name2, domains...) + }) + return nil +} + +// HasLink determines whether role: name1 inherits role: name2. +func (dm *DomainManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { + domain, err := dm.getDomain(domains...) + if err != nil { + return false, err + } + rm := dm.getRoleManager(domain, false) + return rm.HasLink(name1, name2, domains...) +} + +// GetRoles gets the roles that a subject inherits. +func (dm *DomainManager) GetRoles(name string, domains ...string) ([]string, error) { + domain, err := dm.getDomain(domains...) + if err != nil { + return nil, err + } + rm := dm.getRoleManager(domain, false) + return rm.GetRoles(name, domains...) +} + +// GetUsers gets the users of a role. +func (dm *DomainManager) GetUsers(name string, domains ...string) ([]string, error) { + domain, err := dm.getDomain(domains...) + if err != nil { + return nil, err + } + rm := dm.getRoleManager(domain, false) + return rm.GetUsers(name, domains...) +} + +func (dm *DomainManager) toString() []string { + var roles []string + + dm.rmMap.Range(func(key, value interface{}) bool { + domain := key.(string) + rm := value.(*RoleManagerImpl) + domainRoles := rm.toString() + roles = append(roles, fmt.Sprintf("%s: %s", domain, strings.Join(domainRoles, ", "))) + return true + }) + + return roles +} + +// PrintRoles prints all the roles to log. +func (dm *DomainManager) PrintRoles() error { + if !(dm.logger).IsEnabled() { + return nil + } + + roles := dm.toString() + dm.logger.LogRole(roles) + return nil +} + +// GetDomains gets domains that a user has. +func (dm *DomainManager) GetDomains(name string) ([]string, error) { + var domains []string + dm.rmMap.Range(func(key, value interface{}) bool { + domain := key.(string) + rm := value.(*RoleManagerImpl) + role, created := rm.getRole(name) + if created { + defer rm.removeRole(role.name) + } + if len(role.getUsers()) > 0 || len(role.getRoles()) > 0 { + domains = append(domains, domain) + } + return true + }) + return domains, nil +} + +// GetAllDomains gets all domains. +func (dm *DomainManager) GetAllDomains() ([]string, error) { + var domains []string + dm.rmMap.Range(func(key, value interface{}) bool { + domains = append(domains, key.(string)) + return true + }) + return domains, nil +} + +// Deprecated: BuildRelationship is no longer required. +func (dm *DomainManager) BuildRelationship(name1 string, name2 string, domain ...string) error { + return nil +} + +type RoleManager struct { + *DomainManager +} + +func NewRoleManager(maxHierarchyLevel int) *RoleManager { + rm := &RoleManager{} + rm.DomainManager = NewDomainManager(maxHierarchyLevel) + return rm +} + +type ConditionalRoleManager struct { + RoleManagerImpl +} + +func (crm *ConditionalRoleManager) copyFrom(other *ConditionalRoleManager) { + other.Range(func(name1, name2 string, domain ...string) bool { + _ = crm.AddLink(name1, name2, domain...) + return true + }) +} + +// use this constructor to avoid rebuild of AddMatchingFunc. +func newConditionalRoleManagerWithMatchingFunc(maxHierarchyLevel int, fn rbac.MatchingFunc) *ConditionalRoleManager { + rm := NewConditionalRoleManager(maxHierarchyLevel) + rm.matchingFunc = fn + return rm +} + +// NewConditionalRoleManager is the constructor for creating an instance of the +// ConditionalRoleManager implementation. +func NewConditionalRoleManager(maxHierarchyLevel int) *ConditionalRoleManager { + rm := ConditionalRoleManager{} + _ = rm.Clear() // init allRoles and matchingFuncCache + rm.maxHierarchyLevel = maxHierarchyLevel + rm.SetLogger(&log.DefaultLogger{}) + return &rm +} + +// HasLink determines whether role: name1 inherits role: name2. +func (crm *ConditionalRoleManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { + if name1 == name2 || (crm.matchingFunc != nil && crm.Match(name1, name2)) { + return true, nil + } + + user, userCreated := crm.getRole(name1) + role, roleCreated := crm.getRole(name2) + + if userCreated { + defer crm.removeRole(user.name) + } + if roleCreated { + defer crm.removeRole(role.name) + } + + return crm.hasLinkHelper(role.name, map[string]*Role{user.name: user}, crm.maxHierarchyLevel, domains...), nil +} + +// hasLinkHelper use the Breadth First Search algorithm to traverse the Role tree +// Judging whether the user has a role (has link) is to judge whether the role node can be reached from the user node. +func (crm *ConditionalRoleManager) hasLinkHelper(targetName string, roles map[string]*Role, level int, domains ...string) bool { + if level < 0 || len(roles) == 0 { + return false + } + nextRoles := map[string]*Role{} + for _, role := range roles { + if targetName == role.name || (crm.matchingFunc != nil && crm.Match(role.name, targetName)) { + return true + } + role.rangeRoles(func(key, value interface{}) bool { + nextRole := value.(*Role) + return crm.getNextRoles(role, nextRole, domains, nextRoles) + }) + } + + return crm.hasLinkHelper(targetName, nextRoles, level-1) +} + +func (crm *ConditionalRoleManager) getNextRoles(currentRole, nextRole *Role, domains []string, nextRoles map[string]*Role) bool { + passLinkConditionFunc := true + var err error + // If LinkConditionFunc exists, it needs to pass the verification to get nextRole + if len(domains) == 0 { + if linkConditionFunc, existLinkCondition := crm.GetLinkConditionFunc(currentRole.name, nextRole.name); existLinkCondition { + params, _ := crm.GetLinkConditionFuncParams(currentRole.name, nextRole.name) + passLinkConditionFunc, err = linkConditionFunc(params...) + } + } else { + if linkConditionFunc, existLinkCondition := crm.GetDomainLinkConditionFunc(currentRole.name, nextRole.name, domains[0]); existLinkCondition { + params, _ := crm.GetLinkConditionFuncParams(currentRole.name, nextRole.name, domains[0]) + passLinkConditionFunc, err = linkConditionFunc(params...) + } + } + + if err != nil { + crm.logger.LogError(err, "hasLinkHelper LinkCondition Error") + return false + } + + if passLinkConditionFunc { + nextRoles[nextRole.name] = nextRole + } + + return true +} + +// GetLinkConditionFunc get LinkConditionFunc based on userName, roleName. +func (crm *ConditionalRoleManager) GetLinkConditionFunc(userName, roleName string) (rbac.LinkConditionFunc, bool) { + return crm.GetDomainLinkConditionFunc(userName, roleName, defaultDomain) +} + +// GetDomainLinkConditionFunc get LinkConditionFunc based on userName, roleName, domain. +func (crm *ConditionalRoleManager) GetDomainLinkConditionFunc(userName, roleName, domain string) (rbac.LinkConditionFunc, bool) { + user, userCreated := crm.getRole(userName) + role, roleCreated := crm.getRole(roleName) + + if userCreated { + crm.removeRole(user.name) + return nil, false + } + + if roleCreated { + crm.removeRole(role.name) + return nil, false + } + + return user.getLinkConditionFunc(role, domain) +} + +// GetLinkConditionFuncParams gets parameters of LinkConditionFunc based on userName, roleName, domain. +func (crm *ConditionalRoleManager) GetLinkConditionFuncParams(userName, roleName string, domain ...string) ([]string, bool) { + user, userCreated := crm.getRole(userName) + role, roleCreated := crm.getRole(roleName) + + if userCreated { + crm.removeRole(user.name) + return nil, false + } + + if roleCreated { + crm.removeRole(role.name) + return nil, false + } + + domainName := defaultDomain + if len(domain) != 0 { + domainName = domain[0] + } + + if params, ok := user.getLinkConditionFuncParams(role, domainName); ok { + return params, true + } else { + return nil, false + } +} + +// AddLinkConditionFunc is based on userName, roleName, add LinkConditionFunc. +func (crm *ConditionalRoleManager) AddLinkConditionFunc(userName, roleName string, fn rbac.LinkConditionFunc) { + crm.AddDomainLinkConditionFunc(userName, roleName, defaultDomain, fn) +} + +// AddDomainLinkConditionFunc is based on userName, roleName, domain, add LinkConditionFunc. +func (crm *ConditionalRoleManager) AddDomainLinkConditionFunc(userName, roleName, domain string, fn rbac.LinkConditionFunc) { + user, _ := crm.getRole(userName) + role, _ := crm.getRole(roleName) + + user.addLinkConditionFunc(role, domain, fn) +} + +// SetLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain. +func (crm *ConditionalRoleManager) SetLinkConditionFuncParams(userName, roleName string, params ...string) { + crm.SetDomainLinkConditionFuncParams(userName, roleName, defaultDomain, params...) +} + +// SetDomainLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain. +func (crm *ConditionalRoleManager) SetDomainLinkConditionFuncParams(userName, roleName, domain string, params ...string) { + user, _ := crm.getRole(userName) + role, _ := crm.getRole(roleName) + + user.setLinkConditionFuncParams(role, domain, params...) +} + +type ConditionalDomainManager struct { + ConditionalRoleManager + DomainManager +} + +// NewConditionalDomainManager is the constructor for creating an instance of the +// ConditionalDomainManager implementation. +func NewConditionalDomainManager(maxHierarchyLevel int) *ConditionalDomainManager { + rm := ConditionalDomainManager{} + _ = rm.Clear() // init allRoles and matchingFuncCache + rm.maxHierarchyLevel = maxHierarchyLevel + rm.SetLogger(&log.DefaultLogger{}) + return &rm +} + +func (cdm *ConditionalDomainManager) load(name interface{}) (value *ConditionalRoleManager, ok bool) { + if r, ok := cdm.rmMap.Load(name); ok { + return r.(*ConditionalRoleManager), true + } + return nil, false +} + +// load or create a ConditionalRoleManager instance of domain. +func (cdm *ConditionalDomainManager) getConditionalRoleManager(domain string, store bool) *ConditionalRoleManager { + var rm *ConditionalRoleManager + var ok bool + + if rm, ok = cdm.load(domain); !ok { + rm = newConditionalRoleManagerWithMatchingFunc(cdm.maxHierarchyLevel, cdm.matchingFunc) + if store { + cdm.rmMap.Store(domain, rm) + } + if cdm.domainMatchingFunc != nil { + cdm.rmMap.Range(func(key, value interface{}) bool { + domain2 := key.(string) + rm2 := value.(*ConditionalRoleManager) + if domain != domain2 && cdm.Match(domain, domain2) { + rm.copyFrom(rm2) + } + return true + }) + } + } + return rm +} + +// HasLink determines whether role: name1 inherits role: name2. +func (cdm *ConditionalDomainManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { + domain, err := cdm.getDomain(domains...) + if err != nil { + return false, err + } + rm := cdm.getConditionalRoleManager(domain, false) + return rm.HasLink(name1, name2, domains...) +} + +// AddLink adds the inheritance link between role: name1 and role: name2. +// aka role: name1 inherits role: name2. +func (cdm *ConditionalDomainManager) AddLink(name1 string, name2 string, domains ...string) error { + domain, err := cdm.getDomain(domains...) + if err != nil { + return err + } + conditionalRoleManager := cdm.getConditionalRoleManager(domain, true) // create role manager if it does not exist + _ = conditionalRoleManager.AddLink(name1, name2, domain) + + cdm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { + _ = rm.AddLink(name1, name2, domain) + }) + return nil +} + +// DeleteLink deletes the inheritance link between role: name1 and role: name2. +// aka role: name1 does not inherit role: name2 any more. +func (cdm *ConditionalDomainManager) DeleteLink(name1 string, name2 string, domains ...string) error { + domain, err := cdm.getDomain(domains...) + if err != nil { + return err + } + conditionalRoleManager := cdm.getConditionalRoleManager(domain, true) // create role manager if it does not exist + _ = conditionalRoleManager.DeleteLink(name1, name2, domain) + + cdm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { + _ = rm.DeleteLink(name1, name2, domain) + }) + return nil +} + +// AddLinkConditionFunc is based on userName, roleName, add LinkConditionFunc. +func (cdm *ConditionalDomainManager) AddLinkConditionFunc(userName, roleName string, fn rbac.LinkConditionFunc) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).AddLinkConditionFunc(userName, roleName, fn) + return true + }) +} + +// AddDomainLinkConditionFunc is based on userName, roleName, domain, add LinkConditionFunc. +func (cdm *ConditionalDomainManager) AddDomainLinkConditionFunc(userName, roleName, domain string, fn rbac.LinkConditionFunc) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).AddDomainLinkConditionFunc(userName, roleName, domain, fn) + return true + }) +} + +// SetLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName. +func (cdm *ConditionalDomainManager) SetLinkConditionFuncParams(userName, roleName string, params ...string) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).SetLinkConditionFuncParams(userName, roleName, params...) + return true + }) +} + +// SetDomainLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain. +func (cdm *ConditionalDomainManager) SetDomainLinkConditionFuncParams(userName, roleName, domain string, params ...string) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).SetDomainLinkConditionFuncParams(userName, roleName, domain, params...) + return true + }) +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go new file mode 100644 index 0000000000..28b40a352c --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go @@ -0,0 +1,76 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rbac + +import "github.com/casbin/casbin/v2/log" + +type MatchingFunc func(arg1 string, arg2 string) bool + +type LinkConditionFunc = func(args ...string) (bool, error) + +// RoleManager provides interface to define the operations for managing roles. +type RoleManager interface { + // Clear clears all stored data and resets the role manager to the initial state. + Clear() error + // AddLink adds the inheritance link between two roles. role: name1 and role: name2. + // domain is a prefix to the roles (can be used for other purposes). + AddLink(name1 string, name2 string, domain ...string) error + // Deprecated: BuildRelationship is no longer required + BuildRelationship(name1 string, name2 string, domain ...string) error + // DeleteLink deletes the inheritance link between two roles. role: name1 and role: name2. + // domain is a prefix to the roles (can be used for other purposes). + DeleteLink(name1 string, name2 string, domain ...string) error + // HasLink determines whether a link exists between two roles. role: name1 inherits role: name2. + // domain is a prefix to the roles (can be used for other purposes). + HasLink(name1 string, name2 string, domain ...string) (bool, error) + // GetRoles gets the roles that a user inherits. + // domain is a prefix to the roles (can be used for other purposes). + GetRoles(name string, domain ...string) ([]string, error) + // GetUsers gets the users that inherits a role. + // domain is a prefix to the users (can be used for other purposes). + GetUsers(name string, domain ...string) ([]string, error) + // GetDomains gets domains that a user has + GetDomains(name string) ([]string, error) + // GetAllDomains gets all domains + GetAllDomains() ([]string, error) + // PrintRoles prints all the roles to log. + PrintRoles() error + // SetLogger sets role manager's logger. + SetLogger(logger log.Logger) + // Match matches the domain with the pattern + Match(str string, pattern string) bool + // AddMatchingFunc adds the matching function + AddMatchingFunc(name string, fn MatchingFunc) + // AddDomainMatchingFunc adds the domain matching function + AddDomainMatchingFunc(name string, fn MatchingFunc) +} + +// ConditionalRoleManager provides interface to define the operations for managing roles. +// Link with conditions is supported. +type ConditionalRoleManager interface { + RoleManager + + // AddLinkConditionFunc Add condition function fn for Link userName->roleName, + // when fn returns true, Link is valid, otherwise invalid + AddLinkConditionFunc(userName, roleName string, fn LinkConditionFunc) + // SetLinkConditionFuncParams Sets the parameters of the condition function fn for Link userName->roleName + SetLinkConditionFuncParams(userName, roleName string, params ...string) + // AddDomainLinkConditionFunc Add condition function fn for Link userName-> {roleName, domain}, + // when fn returns true, Link is valid, otherwise invalid + AddDomainLinkConditionFunc(user string, role string, domain string, fn LinkConditionFunc) + // SetDomainLinkConditionFuncParams Sets the parameters of the condition function fn + // for Link userName->{roleName, domain} + SetDomainLinkConditionFuncParams(user string, role string, domain string, params ...string) +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api.go b/vendor/github.com/casbin/casbin/v2/rbac_api.go new file mode 100644 index 0000000000..fe2a6e7a09 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac_api.go @@ -0,0 +1,644 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "fmt" + "strings" + + "github.com/casbin/casbin/v2/constant" + "github.com/casbin/casbin/v2/errors" + "github.com/casbin/casbin/v2/util" +) + +// GetRolesForUser gets the roles that a user has. +func (e *Enforcer) GetRolesForUser(name string, domain ...string) ([]string, error) { + rm := e.GetRoleManager() + if rm == nil { + return nil, fmt.Errorf("role manager is not initialized") + } + res, err := rm.GetRoles(name, domain...) + return res, err +} + +// GetUsersForRole gets the users that has a role. +func (e *Enforcer) GetUsersForRole(name string, domain ...string) ([]string, error) { + rm := e.GetRoleManager() + if rm == nil { + return nil, fmt.Errorf("role manager is not initialized") + } + res, err := rm.GetUsers(name, domain...) + return res, err +} + +// HasRoleForUser determines whether a user has a role. +func (e *Enforcer) HasRoleForUser(name string, role string, domain ...string) (bool, error) { + roles, err := e.GetRolesForUser(name, domain...) + if err != nil { + return false, err + } + hasRole := false + for _, r := range roles { + if r == role { + hasRole = true + break + } + } + + return hasRole, nil +} + +// AddRoleForUser adds a role for a user. +// Returns false if the user already has the role (aka not affected). +func (e *Enforcer) AddRoleForUser(user string, role string, domain ...string) (bool, error) { + args := []string{user, role} + args = append(args, domain...) + return e.AddGroupingPolicy(args) +} + +// AddRolesForUser adds roles for a user. +// Returns false if the user already has the roles (aka not affected). +func (e *Enforcer) AddRolesForUser(user string, roles []string, domain ...string) (bool, error) { + var rules [][]string + for _, role := range roles { + rule := []string{user, role} + rule = append(rule, domain...) + rules = append(rules, rule) + } + return e.AddGroupingPolicies(rules) +} + +// DeleteRoleForUser deletes a role for a user. +// Returns false if the user does not have the role (aka not affected). +func (e *Enforcer) DeleteRoleForUser(user string, role string, domain ...string) (bool, error) { + args := []string{user, role} + args = append(args, domain...) + return e.RemoveGroupingPolicy(args) +} + +// DeleteRolesForUser deletes all roles for a user. +// Returns false if the user does not have any roles (aka not affected). +func (e *Enforcer) DeleteRolesForUser(user string, domain ...string) (bool, error) { + var args []string + if len(domain) == 0 { + args = []string{user} + } else if len(domain) > 1 { + return false, errors.ErrDomainParameter + } else { + args = []string{user, "", domain[0]} + } + return e.RemoveFilteredGroupingPolicy(0, args...) +} + +// DeleteUser deletes a user. +// Returns false if the user does not exist (aka not affected). +func (e *Enforcer) DeleteUser(user string) (bool, error) { + var err error + res1, err := e.RemoveFilteredGroupingPolicy(0, user) + if err != nil { + return res1, err + } + + subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) + if err != nil { + return false, err + } + res2, err := e.RemoveFilteredPolicy(subIndex, user) + return res1 || res2, err +} + +// DeleteRole deletes a role. +// Returns false if the role does not exist (aka not affected). +func (e *Enforcer) DeleteRole(role string) (bool, error) { + var err error + res1, err := e.RemoveFilteredGroupingPolicy(0, role) + if err != nil { + return res1, err + } + + res2, err := e.RemoveFilteredGroupingPolicy(1, role) + if err != nil { + return res1, err + } + + subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) + if err != nil { + return false, err + } + res3, err := e.RemoveFilteredPolicy(subIndex, role) + return res1 || res2 || res3, err +} + +// DeletePermission deletes a permission. +// Returns false if the permission does not exist (aka not affected). +func (e *Enforcer) DeletePermission(permission ...string) (bool, error) { + return e.RemoveFilteredPolicy(1, permission...) +} + +// AddPermissionForUser adds a permission for a user or role. +// Returns false if the user or role already has the permission (aka not affected). +func (e *Enforcer) AddPermissionForUser(user string, permission ...string) (bool, error) { + return e.AddPolicy(util.JoinSlice(user, permission...)) +} + +// AddPermissionsForUser adds multiple permissions for a user or role. +// Returns false if the user or role already has one of the permissions (aka not affected). +func (e *Enforcer) AddPermissionsForUser(user string, permissions ...[]string) (bool, error) { + var rules [][]string + for _, permission := range permissions { + rules = append(rules, util.JoinSlice(user, permission...)) + } + return e.AddPolicies(rules) +} + +// DeletePermissionForUser deletes a permission for a user or role. +// Returns false if the user or role does not have the permission (aka not affected). +func (e *Enforcer) DeletePermissionForUser(user string, permission ...string) (bool, error) { + return e.RemovePolicy(util.JoinSlice(user, permission...)) +} + +// DeletePermissionsForUser deletes permissions for a user or role. +// Returns false if the user or role does not have any permissions (aka not affected). +func (e *Enforcer) DeletePermissionsForUser(user string) (bool, error) { + subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) + if err != nil { + return false, err + } + return e.RemoveFilteredPolicy(subIndex, user) +} + +// GetPermissionsForUser gets permissions for a user or role. +func (e *Enforcer) GetPermissionsForUser(user string, domain ...string) ([][]string, error) { + return e.GetNamedPermissionsForUser("p", user, domain...) +} + +// GetNamedPermissionsForUser gets permissions for a user or role by named policy. +func (e *Enforcer) GetNamedPermissionsForUser(ptype string, user string, domain ...string) ([][]string, error) { + permission := make([][]string, 0) + for pType, assertion := range e.model["p"] { + if pType != ptype { + continue + } + args := make([]string, len(assertion.Tokens)) + subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) + if err != nil { + subIndex = 0 + } + args[subIndex] = user + + if len(domain) > 0 { + var index int + index, err = e.GetFieldIndex(ptype, constant.DomainIndex) + if err != nil { + return permission, err + } + args[index] = domain[0] + } + perm, err := e.GetFilteredNamedPolicy(ptype, 0, args...) + if err != nil { + return permission, err + } + permission = append(permission, perm...) + } + return permission, nil +} + +// HasPermissionForUser determines whether a user has a permission. +func (e *Enforcer) HasPermissionForUser(user string, permission ...string) (bool, error) { + return e.HasPolicy(util.JoinSlice(user, permission...)) +} + +// GetImplicitRolesForUser gets implicit roles that a user has. +// Compared to GetRolesForUser(), this function retrieves indirect roles besides direct roles. +// For example: +// g, alice, role:admin +// g, role:admin, role:user +// +// GetRolesForUser("alice") can only get: ["role:admin"]. +// But GetImplicitRolesForUser("alice") will get: ["role:admin", "role:user"]. +func (e *Enforcer) GetImplicitRolesForUser(name string, domain ...string) ([]string, error) { + var res []string + + for v := range e.rmMap { + roles, err := e.GetNamedImplicitRolesForUser(v, name, domain...) + if err != nil { + return nil, err + } + res = append(res, roles...) + } + + return res, nil +} + +// GetNamedImplicitRolesForUser gets implicit roles that a user has by named role definition. +// Compared to GetImplicitRolesForUser(), this function retrieves indirect roles besides direct roles. +// For example: +// g, alice, role:admin +// g, role:admin, role:user +// g2, alice, role:admin2 +// +// GetImplicitRolesForUser("alice") can only get: ["role:admin", "role:user"]. +// But GetNamedImplicitRolesForUser("g2", "alice") will get: ["role:admin2"]. +func (e *Enforcer) GetNamedImplicitRolesForUser(ptype string, name string, domain ...string) ([]string, error) { + var res []string + + rm := e.GetNamedRoleManager(ptype) + if rm == nil { + return nil, fmt.Errorf("role manager %s is not initialized", ptype) + } + roleSet := make(map[string]bool) + roleSet[name] = true + q := make([]string, 0) + q = append(q, name) + + for len(q) > 0 { + name := q[0] + q = q[1:] + + roles, err := rm.GetRoles(name, domain...) + if err != nil { + return nil, err + } + for _, r := range roles { + if _, ok := roleSet[r]; !ok { + res = append(res, r) + q = append(q, r) + roleSet[r] = true + } + } + } + + return res, nil +} + +// GetImplicitUsersForRole gets implicit users for a role. +func (e *Enforcer) GetImplicitUsersForRole(name string, domain ...string) ([]string, error) { + res := []string{} + + for _, rm := range e.rmMap { + roleSet := make(map[string]bool) + roleSet[name] = true + q := make([]string, 0) + q = append(q, name) + + for len(q) > 0 { + name := q[0] + q = q[1:] + + roles, err := rm.GetUsers(name, domain...) + if err != nil && err.Error() != "error: name does not exist" { + return nil, err + } + for _, r := range roles { + if _, ok := roleSet[r]; !ok { + res = append(res, r) + q = append(q, r) + roleSet[r] = true + } + } + } + } + + return res, nil +} + +// GetImplicitPermissionsForUser gets implicit permissions for a user or role. +// Compared to GetPermissionsForUser(), this function retrieves permissions for inherited roles. +// For example: +// p, admin, data1, read +// p, alice, data2, read +// g, alice, admin +// +// GetPermissionsForUser("alice") can only get: [["alice", "data2", "read"]]. +// But GetImplicitPermissionsForUser("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]]. +func (e *Enforcer) GetImplicitPermissionsForUser(user string, domain ...string) ([][]string, error) { + return e.GetNamedImplicitPermissionsForUser("p", "g", user, domain...) +} + +// GetNamedImplicitPermissionsForUser gets implicit permissions for a user or role by named policy. +// Compared to GetNamedPermissionsForUser(), this function retrieves permissions for inherited roles. +// For example: +// p, admin, data1, read +// p2, admin, create +// g, alice, admin +// +// GetImplicitPermissionsForUser("alice") can only get: [["admin", "data1", "read"]], whose policy is default policy "p" +// But you can specify the named policy "p2" to get: [["admin", "create"]] by GetNamedImplicitPermissionsForUser("p2","alice"). +func (e *Enforcer) GetNamedImplicitPermissionsForUser(ptype string, gtype string, user string, domain ...string) ([][]string, error) { + permission := make([][]string, 0) + rm := e.GetNamedRoleManager(gtype) + if rm == nil { + return nil, fmt.Errorf("role manager %s is not initialized", gtype) + } + + roles, err := e.GetNamedImplicitRolesForUser(gtype, user, domain...) + if err != nil { + return nil, err + } + policyRoles := make(map[string]struct{}, len(roles)+1) + policyRoles[user] = struct{}{} + for _, r := range roles { + policyRoles[r] = struct{}{} + } + + domainIndex, err := e.GetFieldIndex(ptype, constant.DomainIndex) + for _, rule := range e.model["p"][ptype].Policy { + if len(domain) == 0 { + if _, ok := policyRoles[rule[0]]; ok { + permission = append(permission, deepCopyPolicy(rule)) + } + continue + } + if len(domain) > 1 { + return nil, errors.ErrDomainParameter + } + if err != nil { + return nil, err + } + d := domain[0] + matched := rm.Match(d, rule[domainIndex]) + if !matched { + continue + } + if _, ok := policyRoles[rule[0]]; ok { + newRule := deepCopyPolicy(rule) + newRule[domainIndex] = d + permission = append(permission, newRule) + } + } + return permission, nil +} + +// GetImplicitUsersForPermission gets implicit users for a permission. +// For example: +// p, admin, data1, read +// p, bob, data1, read +// g, alice, admin +// +// GetImplicitUsersForPermission("data1", "read") will get: ["alice", "bob"]. +// Note: only users will be returned, roles (2nd arg in "g") will be excluded. +func (e *Enforcer) GetImplicitUsersForPermission(permission ...string) ([]string, error) { + pSubjects, err := e.GetAllSubjects() + if err != nil { + return nil, err + } + gInherit, err := e.model.GetValuesForFieldInPolicyAllTypes("g", 1) + if err != nil { + return nil, err + } + gSubjects, err := e.model.GetValuesForFieldInPolicyAllTypes("g", 0) + if err != nil { + return nil, err + } + + subjects := append(pSubjects, gSubjects...) + util.ArrayRemoveDuplicates(&subjects) + + subjects = util.SetSubtract(subjects, gInherit) + + res := []string{} + for _, user := range subjects { + req := util.JoinSliceAny(user, permission...) + allowed, err := e.Enforce(req...) + if err != nil { + return nil, err + } + + if allowed { + res = append(res, user) + } + } + + return res, nil +} + +// GetDomainsForUser gets all domains. +func (e *Enforcer) GetDomainsForUser(user string) ([]string, error) { + var domains []string + for _, rm := range e.rmMap { + domain, err := rm.GetDomains(user) + if err != nil { + return nil, err + } + domains = append(domains, domain...) + } + return domains, nil +} + +// GetImplicitResourcesForUser returns all policies that user obtaining in domain. +func (e *Enforcer) GetImplicitResourcesForUser(user string, domain ...string) ([][]string, error) { + permissions, err := e.GetImplicitPermissionsForUser(user, domain...) + if err != nil { + return nil, err + } + res := make([][]string, 0) + for _, permission := range permissions { + if permission[0] == user { + res = append(res, permission) + continue + } + resLocal := [][]string{{user}} + tokensLength := len(permission) + t := make([][]string, 1, tokensLength) + for _, token := range permission[1:] { + tokens, err := e.GetImplicitUsersForRole(token, domain...) + if err != nil { + return nil, err + } + tokens = append(tokens, token) + t = append(t, tokens) + } + for i := 1; i < tokensLength; i++ { + n := make([][]string, 0) + for _, tokens := range t[i] { + for _, policy := range resLocal { + t := append([]string(nil), policy...) + t = append(t, tokens) + n = append(n, t) + } + } + resLocal = n + } + res = append(res, resLocal...) + } + return res, nil +} + +// deepCopyPolicy returns a deepcopy version of the policy to prevent changing policies through returned slice. +func deepCopyPolicy(src []string) []string { + newRule := make([]string, len(src)) + copy(newRule, src) + return newRule +} + +// GetAllowedObjectConditions returns a string array of object conditions that the user can access. +// For example: conditions, err := e.GetAllowedObjectConditions("alice", "read", "r.obj.") +// Note: +// +// 0. prefix: You can customize the prefix of the object conditions, and "r.obj." is commonly used as a prefix. +// After removing the prefix, the remaining part is the condition of the object. +// If there is an obj policy that does not meet the prefix requirement, an errors.ERR_OBJ_CONDITION will be returned. +// +// 1. If the 'objectConditions' array is empty, return errors.ERR_EMPTY_CONDITION +// This error is returned because some data adapters' ORM return full table data by default +// when they receive an empty condition, which tends to behave contrary to expectations.(e.g. GORM) +// If you are using an adapter that does not behave like this, you can choose to ignore this error. +func (e *Enforcer) GetAllowedObjectConditions(user string, action string, prefix string) ([]string, error) { + permissions, err := e.GetImplicitPermissionsForUser(user) + if err != nil { + return nil, err + } + + var objectConditions []string + for _, policy := range permissions { + // policy {sub, obj, act} + if policy[2] == action { + if !strings.HasPrefix(policy[1], prefix) { + return nil, errors.ErrObjCondition + } + objectConditions = append(objectConditions, strings.TrimPrefix(policy[1], prefix)) + } + } + + if len(objectConditions) == 0 { + return nil, errors.ErrEmptyCondition + } + + return objectConditions, nil +} + +// removeDuplicatePermissions Convert permissions to string as a hash to deduplicate. +func removeDuplicatePermissions(permissions [][]string) [][]string { + permissionsSet := make(map[string]bool) + res := make([][]string, 0) + for _, permission := range permissions { + permissionStr := util.ArrayToString(permission) + if permissionsSet[permissionStr] { + continue + } + permissionsSet[permissionStr] = true + res = append(res, permission) + } + return res +} + +// GetImplicitUsersForResource return implicit user based on resource. +// for example: +// p, alice, data1, read +// p, bob, data2, write +// p, data2_admin, data2, read +// p, data2_admin, data2, write +// g, alice, data2_admin +// GetImplicitUsersForResource("data2") will return [[bob data2 write] [alice data2 read] [alice data2 write]] +// GetImplicitUsersForResource("data1") will return [[alice data1 read]] +// Note: only users will be returned, roles (2nd arg in "g") will be excluded. +func (e *Enforcer) GetImplicitUsersForResource(resource string) ([][]string, error) { + permissions := make([][]string, 0) + subjectIndex, _ := e.GetFieldIndex("p", "sub") + objectIndex, _ := e.GetFieldIndex("p", "obj") + rm := e.GetRoleManager() + if rm == nil { + return nil, fmt.Errorf("role manager is not initialized") + } + + isRole := make(map[string]bool) + roles, err := e.GetAllRoles() + if err != nil { + return nil, err + } + for _, role := range roles { + isRole[role] = true + } + + for _, rule := range e.model["p"]["p"].Policy { + obj := rule[objectIndex] + if obj != resource { + continue + } + + sub := rule[subjectIndex] + + if !isRole[sub] { + permissions = append(permissions, rule) + } else { + users, err := rm.GetUsers(sub) + if err != nil { + return nil, err + } + + for _, user := range users { + implicitUserRule := deepCopyPolicy(rule) + implicitUserRule[subjectIndex] = user + permissions = append(permissions, implicitUserRule) + } + } + } + + res := removeDuplicatePermissions(permissions) + return res, nil +} + +// GetImplicitUsersForResourceByDomain return implicit user based on resource and domain. +// Compared to GetImplicitUsersForResource, domain is supported. +func (e *Enforcer) GetImplicitUsersForResourceByDomain(resource string, domain string) ([][]string, error) { + permissions := make([][]string, 0) + subjectIndex, _ := e.GetFieldIndex("p", "sub") + objectIndex, _ := e.GetFieldIndex("p", "obj") + domIndex, _ := e.GetFieldIndex("p", "dom") + rm := e.GetRoleManager() + if rm == nil { + return nil, fmt.Errorf("role manager is not initialized") + } + + isRole := make(map[string]bool) + + if roles, err := e.GetAllRolesByDomain(domain); err != nil { + return nil, err + } else { + for _, role := range roles { + isRole[role] = true + } + } + + for _, rule := range e.model["p"]["p"].Policy { + obj := rule[objectIndex] + if obj != resource { + continue + } + + sub := rule[subjectIndex] + + if !isRole[sub] { + permissions = append(permissions, rule) + } else { + if domain != rule[domIndex] { + continue + } + users, err := rm.GetUsers(sub, domain) + if err != nil { + return nil, err + } + + for _, user := range users { + implicitUserRule := deepCopyPolicy(rule) + implicitUserRule[subjectIndex] = user + permissions = append(permissions, implicitUserRule) + } + } + } + + res := removeDuplicatePermissions(permissions) + return res, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api_synced.go b/vendor/github.com/casbin/casbin/v2/rbac_api_synced.go new file mode 100644 index 0000000000..7b10e565dd --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac_api_synced.go @@ -0,0 +1,203 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +// GetRolesForUser gets the roles that a user has. +func (e *SyncedEnforcer) GetRolesForUser(name string, domain ...string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetRolesForUser(name, domain...) +} + +// GetUsersForRole gets the users that has a role. +func (e *SyncedEnforcer) GetUsersForRole(name string, domain ...string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetUsersForRole(name, domain...) +} + +// HasRoleForUser determines whether a user has a role. +func (e *SyncedEnforcer) HasRoleForUser(name string, role string, domain ...string) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasRoleForUser(name, role, domain...) +} + +// AddRoleForUser adds a role for a user. +// Returns false if the user already has the role (aka not affected). +func (e *SyncedEnforcer) AddRoleForUser(user string, role string, domain ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddRoleForUser(user, role, domain...) +} + +// AddRolesForUser adds roles for a user. +// Returns false if the user already has the roles (aka not affected). +func (e *SyncedEnforcer) AddRolesForUser(user string, roles []string, domain ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddRolesForUser(user, roles, domain...) +} + +// DeleteRoleForUser deletes a role for a user. +// Returns false if the user does not have the role (aka not affected). +func (e *SyncedEnforcer) DeleteRoleForUser(user string, role string, domain ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteRoleForUser(user, role, domain...) +} + +// DeleteRolesForUser deletes all roles for a user. +// Returns false if the user does not have any roles (aka not affected). +func (e *SyncedEnforcer) DeleteRolesForUser(user string, domain ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteRolesForUser(user, domain...) +} + +// DeleteUser deletes a user. +// Returns false if the user does not exist (aka not affected). +func (e *SyncedEnforcer) DeleteUser(user string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteUser(user) +} + +// DeleteRole deletes a role. +// Returns false if the role does not exist (aka not affected). +func (e *SyncedEnforcer) DeleteRole(role string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteRole(role) +} + +// DeletePermission deletes a permission. +// Returns false if the permission does not exist (aka not affected). +func (e *SyncedEnforcer) DeletePermission(permission ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeletePermission(permission...) +} + +// AddPermissionForUser adds a permission for a user or role. +// Returns false if the user or role already has the permission (aka not affected). +func (e *SyncedEnforcer) AddPermissionForUser(user string, permission ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddPermissionForUser(user, permission...) +} + +// AddPermissionsForUser adds permissions for a user or role. +// Returns false if the user or role already has the permissions (aka not affected). +func (e *SyncedEnforcer) AddPermissionsForUser(user string, permissions ...[]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddPermissionsForUser(user, permissions...) +} + +// DeletePermissionForUser deletes a permission for a user or role. +// Returns false if the user or role does not have the permission (aka not affected). +func (e *SyncedEnforcer) DeletePermissionForUser(user string, permission ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeletePermissionForUser(user, permission...) +} + +// DeletePermissionsForUser deletes permissions for a user or role. +// Returns false if the user or role does not have any permissions (aka not affected). +func (e *SyncedEnforcer) DeletePermissionsForUser(user string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeletePermissionsForUser(user) +} + +// GetPermissionsForUser gets permissions for a user or role. +func (e *SyncedEnforcer) GetPermissionsForUser(user string, domain ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetPermissionsForUser(user, domain...) +} + +// GetNamedPermissionsForUser gets permissions for a user or role by named policy. +func (e *SyncedEnforcer) GetNamedPermissionsForUser(ptype string, user string, domain ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetNamedPermissionsForUser(ptype, user, domain...) +} + +// HasPermissionForUser determines whether a user has a permission. +func (e *SyncedEnforcer) HasPermissionForUser(user string, permission ...string) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasPermissionForUser(user, permission...) +} + +// GetImplicitRolesForUser gets implicit roles that a user has. +// Compared to GetRolesForUser(), this function retrieves indirect roles besides direct roles. +// For example: +// g, alice, role:admin +// g, role:admin, role:user +// +// GetRolesForUser("alice") can only get: ["role:admin"]. +// But GetImplicitRolesForUser("alice") will get: ["role:admin", "role:user"]. +func (e *SyncedEnforcer) GetImplicitRolesForUser(name string, domain ...string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetImplicitRolesForUser(name, domain...) +} + +// GetImplicitPermissionsForUser gets implicit permissions for a user or role. +// Compared to GetPermissionsForUser(), this function retrieves permissions for inherited roles. +// For example: +// p, admin, data1, read +// p, alice, data2, read +// g, alice, admin +// +// GetPermissionsForUser("alice") can only get: [["alice", "data2", "read"]]. +// But GetImplicitPermissionsForUser("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]]. +func (e *SyncedEnforcer) GetImplicitPermissionsForUser(user string, domain ...string) ([][]string, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.GetImplicitPermissionsForUser(user, domain...) +} + +// GetNamedImplicitPermissionsForUser gets implicit permissions for a user or role by named policy. +// Compared to GetNamedPermissionsForUser(), this function retrieves permissions for inherited roles. +// For example: +// p, admin, data1, read +// p2, admin, create +// g, alice, admin +// +// GetImplicitPermissionsForUser("alice") can only get: [["admin", "data1", "read"]], whose policy is default policy "p" +// But you can specify the named policy "p2" to get: [["admin", "create"]] by GetNamedImplicitPermissionsForUser("p2","alice"). +func (e *SyncedEnforcer) GetNamedImplicitPermissionsForUser(ptype string, gtype string, user string, domain ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetNamedImplicitPermissionsForUser(ptype, gtype, user, domain...) +} + +// GetImplicitUsersForPermission gets implicit users for a permission. +// For example: +// p, admin, data1, read +// p, bob, data1, read +// g, alice, admin +// +// GetImplicitUsersForPermission("data1", "read") will get: ["alice", "bob"]. +// Note: only users will be returned, roles (2nd arg in "g") will be excluded. +func (e *SyncedEnforcer) GetImplicitUsersForPermission(permission ...string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetImplicitUsersForPermission(permission...) +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains.go b/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains.go new file mode 100644 index 0000000000..f6fc4a24ee --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains.go @@ -0,0 +1,192 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "fmt" + + "github.com/casbin/casbin/v2/constant" +) + +// GetUsersForRoleInDomain gets the users that has a role inside a domain. Add by Gordon. +func (e *Enforcer) GetUsersForRoleInDomain(name string, domain string) []string { + if e.GetRoleManager() == nil { + return nil + } + res, _ := e.GetRoleManager().GetUsers(name, domain) + return res +} + +// GetRolesForUserInDomain gets the roles that a user has inside a domain. +func (e *Enforcer) GetRolesForUserInDomain(name string, domain string) []string { + if e.GetRoleManager() == nil { + return nil + } + res, _ := e.GetRoleManager().GetRoles(name, domain) + return res +} + +// GetPermissionsForUserInDomain gets permissions for a user or role inside a domain. +func (e *Enforcer) GetPermissionsForUserInDomain(user string, domain string) [][]string { + res, _ := e.GetImplicitPermissionsForUser(user, domain) + return res +} + +// AddRoleForUserInDomain adds a role for a user inside a domain. +// Returns false if the user already has the role (aka not affected). +func (e *Enforcer) AddRoleForUserInDomain(user string, role string, domain string) (bool, error) { + return e.AddGroupingPolicy(user, role, domain) +} + +// DeleteRoleForUserInDomain deletes a role for a user inside a domain. +// Returns false if the user does not have the role (aka not affected). +func (e *Enforcer) DeleteRoleForUserInDomain(user string, role string, domain string) (bool, error) { + return e.RemoveGroupingPolicy(user, role, domain) +} + +// DeleteRolesForUserInDomain deletes all roles for a user inside a domain. +// Returns false if the user does not have any roles (aka not affected). +func (e *Enforcer) DeleteRolesForUserInDomain(user string, domain string) (bool, error) { + if e.GetRoleManager() == nil { + return false, fmt.Errorf("role manager is not initialized") + } + roles, err := e.GetRoleManager().GetRoles(user, domain) + if err != nil { + return false, err + } + + var rules [][]string + for _, role := range roles { + rules = append(rules, []string{user, role, domain}) + } + + return e.RemoveGroupingPolicies(rules) +} + +// GetAllUsersByDomain would get all users associated with the domain. +func (e *Enforcer) GetAllUsersByDomain(domain string) ([]string, error) { + m := make(map[string]struct{}) + g, err := e.model.GetAssertion("g", "g") + if err != nil { + return []string{}, err + } + p := e.model["p"]["p"] + users := make([]string, 0) + index, err := e.GetFieldIndex("p", constant.DomainIndex) + if err != nil { + return []string{}, err + } + + getUser := func(index int, policies [][]string, domain string, m map[string]struct{}) []string { + if len(policies) == 0 || len(policies[0]) <= index { + return []string{} + } + res := make([]string, 0) + for _, policy := range policies { + if _, ok := m[policy[0]]; policy[index] == domain && !ok { + res = append(res, policy[0]) + m[policy[0]] = struct{}{} + } + } + return res + } + + users = append(users, getUser(2, g.Policy, domain, m)...) + users = append(users, getUser(index, p.Policy, domain, m)...) + return users, nil +} + +// DeleteAllUsersByDomain would delete all users associated with the domain. +func (e *Enforcer) DeleteAllUsersByDomain(domain string) (bool, error) { + g, err := e.model.GetAssertion("g", "g") + if err != nil { + return false, err + } + p := e.model["p"]["p"] + index, err := e.GetFieldIndex("p", constant.DomainIndex) + if err != nil { + return false, err + } + + getUser := func(index int, policies [][]string, domain string) [][]string { + if len(policies) == 0 || len(policies[0]) <= index { + return [][]string{} + } + res := make([][]string, 0) + for _, policy := range policies { + if policy[index] == domain { + res = append(res, policy) + } + } + return res + } + + users := getUser(2, g.Policy, domain) + if _, err = e.RemoveGroupingPolicies(users); err != nil { + return false, err + } + users = getUser(index, p.Policy, domain) + if _, err = e.RemovePolicies(users); err != nil { + return false, err + } + return true, nil +} + +// DeleteDomains would delete all associated users and roles. +// It would delete all domains if parameter is not provided. +func (e *Enforcer) DeleteDomains(domains ...string) (bool, error) { + if len(domains) == 0 { + e.ClearPolicy() + return true, nil + } + for _, domain := range domains { + if _, err := e.DeleteAllUsersByDomain(domain); err != nil { + return false, err + } + } + return true, nil +} + +// GetAllDomains would get all domains. +func (e *Enforcer) GetAllDomains() ([]string, error) { + if e.GetRoleManager() == nil { + return nil, fmt.Errorf("role manager is not initialized") + } + return e.GetRoleManager().GetAllDomains() +} + +// GetAllRolesByDomain would get all roles associated with the domain. +// note: Not applicable to Domains with inheritance relationship (implicit roles) +func (e *Enforcer) GetAllRolesByDomain(domain string) ([]string, error) { + g, err := e.model.GetAssertion("g", "g") + if err != nil { + return []string{}, err + } + policies := g.Policy + roles := make([]string, 0) + existMap := make(map[string]bool) // remove duplicates + + for _, policy := range policies { + if policy[len(policy)-1] == domain { + role := policy[len(policy)-2] + if _, ok := existMap[role]; !ok { + roles = append(roles, role) + existMap[role] = true + } + } + } + + return roles, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains_synced.go b/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains_synced.go new file mode 100644 index 0000000000..26f6ce4ba0 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains_synced.go @@ -0,0 +1,60 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +// GetUsersForRoleInDomain gets the users that has a role inside a domain. Add by Gordon. +func (e *SyncedEnforcer) GetUsersForRoleInDomain(name string, domain string) []string { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetUsersForRoleInDomain(name, domain) +} + +// GetRolesForUserInDomain gets the roles that a user has inside a domain. +func (e *SyncedEnforcer) GetRolesForUserInDomain(name string, domain string) []string { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetRolesForUserInDomain(name, domain) +} + +// GetPermissionsForUserInDomain gets permissions for a user or role inside a domain. +func (e *SyncedEnforcer) GetPermissionsForUserInDomain(user string, domain string) [][]string { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetPermissionsForUserInDomain(user, domain) +} + +// AddRoleForUserInDomain adds a role for a user inside a domain. +// Returns false if the user already has the role (aka not affected). +func (e *SyncedEnforcer) AddRoleForUserInDomain(user string, role string, domain string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddRoleForUserInDomain(user, role, domain) +} + +// DeleteRoleForUserInDomain deletes a role for a user inside a domain. +// Returns false if the user does not have the role (aka not affected). +func (e *SyncedEnforcer) DeleteRoleForUserInDomain(user string, role string, domain string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteRoleForUserInDomain(user, role, domain) +} + +// DeleteRolesForUserInDomain deletes all roles for a user inside a domain. +// Returns false if the user does not have any roles (aka not affected). +func (e *SyncedEnforcer) DeleteRolesForUserInDomain(user string, domain string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteRolesForUserInDomain(user, domain) +} diff --git a/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go b/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go new file mode 100644 index 0000000000..bab9b0b3cb --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go @@ -0,0 +1,482 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "errors" + "fmt" + "net" + "path" + "regexp" + "strings" + "sync" + "time" + + "github.com/casbin/casbin/v2/rbac" + + "github.com/casbin/govaluate" +) + +var ( + keyMatch4Re *regexp.Regexp = regexp.MustCompile(`{([^/]+)}`) +) + +// validate the variadic parameter size and type as string. +func validateVariadicArgs(expectedLen int, args ...interface{}) error { + if len(args) != expectedLen { + return fmt.Errorf("expected %d arguments, but got %d", expectedLen, len(args)) + } + + for _, p := range args { + _, ok := p.(string) + if !ok { + return errors.New("argument must be a string") + } + } + + return nil +} + +// validate the variadic string parameter size. +func validateVariadicStringArgs(expectedLen int, args ...string) error { + if len(args) != expectedLen { + return fmt.Errorf("expected %d arguments, but got %d", expectedLen, len(args)) + } + return nil +} + +// KeyMatch determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. +// For example, "/foo/bar" matches "/foo/*". +func KeyMatch(key1 string, key2 string) bool { + i := strings.Index(key2, "*") + if i == -1 { + return key1 == key2 + } + + if len(key1) > i { + return key1[:i] == key2[:i] + } + return key1 == key2[:i] +} + +// KeyMatchFunc is the wrapper for KeyMatch. +func KeyMatchFunc(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyMatch", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyMatch(name1, name2), nil +} + +// KeyGet returns the matched part +// For example, "/foo/bar/foo" matches "/foo/*" +// "bar/foo" will been returned. +func KeyGet(key1, key2 string) string { + i := strings.Index(key2, "*") + if i == -1 { + return "" + } + if len(key1) > i { + if key1[:i] == key2[:i] { + return key1[i:] + } + } + return "" +} + +// KeyGetFunc is the wrapper for KeyGet. +func KeyGetFunc(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyGet", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyGet(name1, name2), nil +} + +// KeyMatch2 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. +// For example, "/foo/bar" matches "/foo/*", "/resource1" matches "/:resource". +func KeyMatch2(key1 string, key2 string) bool { + key2 = strings.Replace(key2, "/*", "/.*", -1) + + re := regexp.MustCompile(`:[^/]+`) + key2 = re.ReplaceAllString(key2, "$1[^/]+$2") + + return RegexMatch(key1, "^"+key2+"$") +} + +// KeyMatch2Func is the wrapper for KeyMatch2. +func KeyMatch2Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyMatch2", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyMatch2(name1, name2), nil +} + +// KeyGet2 returns value matched pattern +// For example, "/resource1" matches "/:resource" +// if the pathVar == "resource", then "resource1" will be returned. +func KeyGet2(key1, key2 string, pathVar string) string { + key2 = strings.Replace(key2, "/*", "/.*", -1) + + re := regexp.MustCompile(`:[^/]+`) + keys := re.FindAllString(key2, -1) + key2 = re.ReplaceAllString(key2, "$1([^/]+)$2") + key2 = "^" + key2 + "$" + re2 := regexp.MustCompile(key2) + values := re2.FindAllStringSubmatch(key1, -1) + if len(values) == 0 { + return "" + } + for i, key := range keys { + if pathVar == key[1:] { + return values[0][i+1] + } + } + return "" +} + +// KeyGet2Func is the wrapper for KeyGet2. +func KeyGet2Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(3, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyGet2", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + key := args[2].(string) + + return KeyGet2(name1, name2, key), nil +} + +// KeyMatch3 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. +// For example, "/foo/bar" matches "/foo/*", "/resource1" matches "/{resource}". +func KeyMatch3(key1 string, key2 string) bool { + key2 = strings.Replace(key2, "/*", "/.*", -1) + + re := regexp.MustCompile(`\{[^/]+\}`) + key2 = re.ReplaceAllString(key2, "$1[^/]+$2") + + return RegexMatch(key1, "^"+key2+"$") +} + +// KeyMatch3Func is the wrapper for KeyMatch3. +func KeyMatch3Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyMatch3", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyMatch3(name1, name2), nil +} + +// KeyGet3 returns value matched pattern +// For example, "project/proj_project1_admin/" matches "project/proj_{project}_admin/" +// if the pathVar == "project", then "project1" will be returned. +func KeyGet3(key1, key2 string, pathVar string) string { + key2 = strings.Replace(key2, "/*", "/.*", -1) + + re := regexp.MustCompile(`\{[^/]+?\}`) // non-greedy match of `{...}` to support multiple {} in `/.../` + keys := re.FindAllString(key2, -1) + key2 = re.ReplaceAllString(key2, "$1([^/]+?)$2") + key2 = "^" + key2 + "$" + re2 := regexp.MustCompile(key2) + values := re2.FindAllStringSubmatch(key1, -1) + if len(values) == 0 { + return "" + } + for i, key := range keys { + if pathVar == key[1:len(key)-1] { + return values[0][i+1] + } + } + return "" +} + +// KeyGet3Func is the wrapper for KeyGet3. +func KeyGet3Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(3, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyGet3", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + key := args[2].(string) + + return KeyGet3(name1, name2, key), nil +} + +// KeyMatch4 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. +// Besides what KeyMatch3 does, KeyMatch4 can also match repeated patterns: +// "/parent/123/child/123" matches "/parent/{id}/child/{id}" +// "/parent/123/child/456" does not match "/parent/{id}/child/{id}" +// But KeyMatch3 will match both. +func KeyMatch4(key1 string, key2 string) bool { + key2 = strings.Replace(key2, "/*", "/.*", -1) + + tokens := []string{} + + re := keyMatch4Re + key2 = re.ReplaceAllStringFunc(key2, func(s string) string { + tokens = append(tokens, s[1:len(s)-1]) + return "([^/]+)" + }) + + re = regexp.MustCompile("^" + key2 + "$") + matches := re.FindStringSubmatch(key1) + if matches == nil { + return false + } + matches = matches[1:] + + if len(tokens) != len(matches) { + panic(errors.New("KeyMatch4: number of tokens is not equal to number of values")) + } + + values := map[string]string{} + + for key, token := range tokens { + if _, ok := values[token]; !ok { + values[token] = matches[key] + } + if values[token] != matches[key] { + return false + } + } + + return true +} + +// KeyMatch4Func is the wrapper for KeyMatch4. +func KeyMatch4Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyMatch4", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyMatch4(name1, name2), nil +} + +// KeyMatch5 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a * +// For example, +// - "/foo/bar?status=1&type=2" matches "/foo/bar" +// - "/parent/child1" and "/parent/child1" matches "/parent/*" +// - "/parent/child1?status=1" matches "/parent/*". +func KeyMatch5(key1 string, key2 string) bool { + i := strings.Index(key1, "?") + + if i != -1 { + key1 = key1[:i] + } + + key2 = strings.Replace(key2, "/*", "/.*", -1) + + re := regexp.MustCompile(`\{[^/]+\}`) + key2 = re.ReplaceAllString(key2, "$1[^/]+$2") + + return RegexMatch(key1, "^"+key2+"$") +} + +// KeyMatch5Func is the wrapper for KeyMatch5. +func KeyMatch5Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyMatch5", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyMatch5(name1, name2), nil +} + +// RegexMatch determines whether key1 matches the pattern of key2 in regular expression. +func RegexMatch(key1 string, key2 string) bool { + res, err := regexp.MatchString(key2, key1) + if err != nil { + panic(err) + } + return res +} + +// RegexMatchFunc is the wrapper for RegexMatch. +func RegexMatchFunc(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "regexMatch", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return RegexMatch(name1, name2), nil +} + +// IPMatch determines whether IP address ip1 matches the pattern of IP address ip2, ip2 can be an IP address or a CIDR pattern. +// For example, "192.168.2.123" matches "192.168.2.0/24". +func IPMatch(ip1 string, ip2 string) bool { + objIP1 := net.ParseIP(ip1) + if objIP1 == nil { + panic("invalid argument: ip1 in IPMatch() function is not an IP address.") + } + + _, cidr, err := net.ParseCIDR(ip2) + if err != nil { + objIP2 := net.ParseIP(ip2) + if objIP2 == nil { + panic("invalid argument: ip2 in IPMatch() function is neither an IP address nor a CIDR.") + } + + return objIP1.Equal(objIP2) + } + + return cidr.Contains(objIP1) +} + +// IPMatchFunc is the wrapper for IPMatch. +func IPMatchFunc(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "ipMatch", err) + } + + ip1 := args[0].(string) + ip2 := args[1].(string) + + return IPMatch(ip1, ip2), nil +} + +// GlobMatch determines whether key1 matches the pattern of key2 using glob pattern. +func GlobMatch(key1 string, key2 string) (bool, error) { + return path.Match(key2, key1) +} + +// GlobMatchFunc is the wrapper for GlobMatch. +func GlobMatchFunc(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "globMatch", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return GlobMatch(name1, name2) +} + +// GenerateGFunction is the factory method of the g(_, _[, _]) function. +func GenerateGFunction(rm rbac.RoleManager) govaluate.ExpressionFunction { + memorized := sync.Map{} + return func(args ...interface{}) (interface{}, error) { + // Like all our other govaluate functions, all args are strings. + + // Allocate and generate a cache key from the arguments... + total := len(args) + for _, a := range args { + aStr := a.(string) + total += len(aStr) + } + builder := strings.Builder{} + builder.Grow(total) + for _, arg := range args { + builder.WriteByte(0) + builder.WriteString(arg.(string)) + } + key := builder.String() + + // ...and see if we've already calculated this. + v, found := memorized.Load(key) + if found { + return v, nil + } + + // If not, do the calculation. + // There are guaranteed to be exactly 2 or 3 arguments. + name1, name2 := args[0].(string), args[1].(string) + if rm == nil { + v = name1 == name2 + } else if len(args) == 2 { + v, _ = rm.HasLink(name1, name2) + } else { + domain := args[2].(string) + v, _ = rm.HasLink(name1, name2, domain) + } + + memorized.Store(key, v) + return v, nil + } +} + +// GenerateConditionalGFunction is the factory method of the g(_, _[, _]) function with conditions. +func GenerateConditionalGFunction(crm rbac.ConditionalRoleManager) govaluate.ExpressionFunction { + return func(args ...interface{}) (interface{}, error) { + // Like all our other govaluate functions, all args are strings. + var hasLink bool + + name1, name2 := args[0].(string), args[1].(string) + if crm == nil { + hasLink = name1 == name2 + } else if len(args) == 2 { + hasLink, _ = crm.HasLink(name1, name2) + } else { + domain := args[2].(string) + hasLink, _ = crm.HasLink(name1, name2, domain) + } + + return hasLink, nil + } +} + +// builtin LinkConditionFunc + +// TimeMatchFunc is the wrapper for TimeMatch. +func TimeMatchFunc(args ...string) (bool, error) { + if err := validateVariadicStringArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "TimeMatch", err) + } + return TimeMatch(args[0], args[1]) +} + +// TimeMatch determines whether the current time is between startTime and endTime. +// You can use "_" to indicate that the parameter is ignored. +func TimeMatch(startTime, endTime string) (bool, error) { + now := time.Now() + if startTime != "_" { + if start, err := time.Parse("2006-01-02 15:04:05", startTime); err != nil { + return false, err + } else if !now.After(start) { + return false, nil + } + } + + if endTime != "_" { + if end, err := time.Parse("2006-01-02 15:04:05", endTime); err != nil { + return false, err + } else if !now.Before(end) { + return false, nil + } + } + + return true, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/util/util.go b/vendor/github.com/casbin/casbin/v2/util/util.go new file mode 100644 index 0000000000..f247b27b4a --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/util/util.go @@ -0,0 +1,383 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "encoding/json" + "regexp" + "sort" + "strings" + "sync" +) + +var evalReg = regexp.MustCompile(`\beval\((?P[^)]*)\)`) + +var escapeAssertionRegex = regexp.MustCompile(`\b((r|p)[0-9]*)\.`) + +func JsonToMap(jsonStr string) (map[string]interface{}, error) { + result := make(map[string]interface{}) + err := json.Unmarshal([]byte(jsonStr), &result) + if err != nil { + return result, err + } + return result, nil +} + +// EscapeAssertion escapes the dots in the assertion, because the expression evaluation doesn't support such variable names. +func EscapeAssertion(s string) string { + s = escapeAssertionRegex.ReplaceAllStringFunc(s, func(m string) string { + return strings.Replace(m, ".", "_", 1) + }) + return s +} + +// RemoveComments removes the comments starting with # in the text. +func RemoveComments(s string) string { + pos := strings.Index(s, "#") + if pos == -1 { + return s + } + return strings.TrimSpace(s[0:pos]) +} + +// ArrayEquals determines whether two string arrays are identical. +func ArrayEquals(a []string, b []string) bool { + if len(a) != len(b) { + return false + } + + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +// Array2DEquals determines whether two 2-dimensional string arrays are identical. +func Array2DEquals(a [][]string, b [][]string) bool { + if len(a) != len(b) { + return false + } + + for i, v := range a { + if !ArrayEquals(v, b[i]) { + return false + } + } + return true +} + +// SortArray2D Sorts the two-dimensional string array. +func SortArray2D(arr [][]string) { + if len(arr) != 0 { + sort.Slice(arr, func(i, j int) bool { + elementLen := len(arr[0]) + for k := 0; k < elementLen; k++ { + if arr[i][k] < arr[j][k] { + return true + } else if arr[i][k] > arr[j][k] { + return false + } + } + return true + }) + } +} + +// SortedArray2DEquals determines whether two 2-dimensional string arrays are identical. +func SortedArray2DEquals(a [][]string, b [][]string) bool { + if len(a) != len(b) { + return false + } + copyA := make([][]string, len(a)) + copy(copyA, a) + copyB := make([][]string, len(b)) + copy(copyB, b) + + SortArray2D(copyA) + SortArray2D(copyB) + + for i, v := range copyA { + if !ArrayEquals(v, copyB[i]) { + return false + } + } + return true +} + +// ArrayRemoveDuplicates removes any duplicated elements in a string array. +func ArrayRemoveDuplicates(s *[]string) { + found := make(map[string]bool) + j := 0 + for i, x := range *s { + if !found[x] { + found[x] = true + (*s)[j] = (*s)[i] + j++ + } + } + *s = (*s)[:j] +} + +// ArrayToString gets a printable string for a string array. +func ArrayToString(s []string) string { + return strings.Join(s, ", ") +} + +// ParamsToString gets a printable string for variable number of parameters. +func ParamsToString(s ...string) string { + return strings.Join(s, ", ") +} + +// SetEquals determines whether two string sets are identical. +func SetEquals(a []string, b []string) bool { + if len(a) != len(b) { + return false + } + + sort.Strings(a) + sort.Strings(b) + + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +// SetEquals determines whether two int sets are identical. +func SetEqualsInt(a []int, b []int) bool { + if len(a) != len(b) { + return false + } + + sort.Ints(a) + sort.Ints(b) + + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +// Set2DEquals determines whether two string slice sets are identical. +func Set2DEquals(a [][]string, b [][]string) bool { + if len(a) != len(b) { + return false + } + + var aa []string + for _, v := range a { + sort.Strings(v) + aa = append(aa, strings.Join(v, ", ")) + } + var bb []string + for _, v := range b { + sort.Strings(v) + bb = append(bb, strings.Join(v, ", ")) + } + + return SetEquals(aa, bb) +} + +// JoinSlice joins a string and a slice into a new slice. +func JoinSlice(a string, b ...string) []string { + res := make([]string, 0, len(b)+1) + + res = append(res, a) + res = append(res, b...) + + return res +} + +// JoinSliceAny joins a string and a slice into a new interface{} slice. +func JoinSliceAny(a string, b ...string) []interface{} { + res := make([]interface{}, 0, len(b)+1) + + res = append(res, a) + for _, s := range b { + res = append(res, s) + } + + return res +} + +// SetSubtract returns the elements in `a` that aren't in `b`. +func SetSubtract(a []string, b []string) []string { + mb := make(map[string]struct{}, len(b)) + for _, x := range b { + mb[x] = struct{}{} + } + var diff []string + for _, x := range a { + if _, found := mb[x]; !found { + diff = append(diff, x) + } + } + return diff +} + +// HasEval determine whether matcher contains function eval. +func HasEval(s string) bool { + return evalReg.MatchString(s) +} + +// ReplaceEval replace function eval with the value of its parameters. +func ReplaceEval(s string, rule string) string { + return evalReg.ReplaceAllString(s, "("+rule+")") +} + +// ReplaceEvalWithMap replace function eval with the value of its parameters via given sets. +func ReplaceEvalWithMap(src string, sets map[string]string) string { + return evalReg.ReplaceAllStringFunc(src, func(s string) string { + subs := evalReg.FindStringSubmatch(s) + if subs == nil { + return s + } + key := subs[1] + value, found := sets[key] + if !found { + return s + } + return evalReg.ReplaceAllString(s, value) + }) +} + +// GetEvalValue returns the parameters of function eval. +func GetEvalValue(s string) []string { + subMatch := evalReg.FindAllStringSubmatch(s, -1) + var rules []string + for _, rule := range subMatch { + rules = append(rules, rule[1]) + } + return rules +} + +func RemoveDuplicateElement(s []string) []string { + result := make([]string, 0, len(s)) + temp := map[string]struct{}{} + for _, item := range s { + if _, ok := temp[item]; !ok { + temp[item] = struct{}{} + result = append(result, item) + } + } + return result +} + +type node struct { + key interface{} + value interface{} + prev *node + next *node +} + +type LRUCache struct { + capacity int + m map[interface{}]*node + head *node + tail *node +} + +func NewLRUCache(capacity int) *LRUCache { + cache := &LRUCache{} + cache.capacity = capacity + cache.m = map[interface{}]*node{} + + head := &node{} + tail := &node{} + + head.next = tail + tail.prev = head + + cache.head = head + cache.tail = tail + + return cache +} + +func (cache *LRUCache) remove(n *node, listOnly bool) { + if !listOnly { + delete(cache.m, n.key) + } + n.prev.next = n.next + n.next.prev = n.prev +} + +func (cache *LRUCache) add(n *node, listOnly bool) { + if !listOnly { + cache.m[n.key] = n + } + headNext := cache.head.next + cache.head.next = n + headNext.prev = n + n.next = headNext + n.prev = cache.head +} + +func (cache *LRUCache) moveToHead(n *node) { + cache.remove(n, true) + cache.add(n, true) +} + +func (cache *LRUCache) Get(key interface{}) (value interface{}, ok bool) { + n, ok := cache.m[key] + if ok { + cache.moveToHead(n) + return n.value, ok + } else { + return nil, ok + } +} + +func (cache *LRUCache) Put(key interface{}, value interface{}) { + n, ok := cache.m[key] + if ok { + cache.remove(n, false) + } else { + n = &node{key, value, nil, nil} + if len(cache.m) >= cache.capacity { + cache.remove(cache.tail.prev, false) + } + } + cache.add(n, false) +} + +type SyncLRUCache struct { + rwm sync.RWMutex + *LRUCache +} + +func NewSyncLRUCache(capacity int) *SyncLRUCache { + cache := &SyncLRUCache{} + cache.LRUCache = NewLRUCache(capacity) + return cache +} + +func (cache *SyncLRUCache) Get(key interface{}) (value interface{}, ok bool) { + cache.rwm.Lock() + defer cache.rwm.Unlock() + return cache.LRUCache.Get(key) +} + +func (cache *SyncLRUCache) Put(key interface{}, value interface{}) { + cache.rwm.Lock() + defer cache.rwm.Unlock() + cache.LRUCache.Put(key, value) +} diff --git a/vendor/github.com/casbin/govaluate/.gitignore b/vendor/github.com/casbin/govaluate/.gitignore new file mode 100644 index 0000000000..da210fb317 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +coverage.out + +manual_test.go +*.out +*.err diff --git a/vendor/github.com/casbin/govaluate/.releaserc.json b/vendor/github.com/casbin/govaluate/.releaserc.json new file mode 100644 index 0000000000..58cb0bb4ca --- /dev/null +++ b/vendor/github.com/casbin/govaluate/.releaserc.json @@ -0,0 +1,16 @@ +{ + "debug": true, + "branches": [ + "+([0-9])?(.{+([0-9]),x}).x", + "master", + { + "name": "beta", + "prerelease": true + } + ], + "plugins": [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + "@semantic-release/github" + ] +} diff --git a/vendor/github.com/casbin/govaluate/CONTRIBUTORS b/vendor/github.com/casbin/govaluate/CONTRIBUTORS new file mode 100644 index 0000000000..c1a7fe42d7 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/CONTRIBUTORS @@ -0,0 +1,15 @@ +This library was authored by George Lester, and contains contributions from: + +vjeantet (regex support) +iasci (ternary operator) +oxtoacart (parameter structures, deferred parameter retrieval) +wmiller848 (bitwise operators) +prashantv (optimization of bools) +dpaolella (exposure of variables used in an expression) +benpaxton (fix for missing type checks during literal elide process) +abrander (panic-finding testing tool, float32 conversions) +xfennec (fix for dates being parsed in the current Location) +bgaifullin (lifting restriction on complex/struct types) +gautambt (hexadecimal literals) +felixonmars (fix multiple typos in test names) +sambonfire (automatic type conversion for accessor function calls) \ No newline at end of file diff --git a/vendor/github.com/casbin/govaluate/EvaluableExpression.go b/vendor/github.com/casbin/govaluate/EvaluableExpression.go new file mode 100644 index 0000000000..a5fe50d475 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/EvaluableExpression.go @@ -0,0 +1,276 @@ +package govaluate + +import ( + "errors" + "fmt" +) + +const isoDateFormat string = "2006-01-02T15:04:05.999999999Z0700" +const shortCircuitHolder int = -1 + +var DUMMY_PARAMETERS = MapParameters(map[string]interface{}{}) + +/* + EvaluableExpression represents a set of ExpressionTokens which, taken together, + are an expression that can be evaluated down into a single value. +*/ +type EvaluableExpression struct { + + /* + Represents the query format used to output dates. Typically only used when creating SQL or Mongo queries from an expression. + Defaults to the complete ISO8601 format, including nanoseconds. + */ + QueryDateFormat string + + /* + Whether or not to safely check types when evaluating. + If true, this library will return error messages when invalid types are used. + If false, the library will panic when operators encounter types they can't use. + + This is exclusively for users who need to squeeze every ounce of speed out of the library as they can, + and you should only set this to false if you know exactly what you're doing. + */ + ChecksTypes bool + + tokens []ExpressionToken + evaluationStages *evaluationStage + inputExpression string +} + +/* + Parses a new EvaluableExpression from the given [expression] string. + Returns an error if the given expression has invalid syntax. +*/ +func NewEvaluableExpression(expression string) (*EvaluableExpression, error) { + + functions := make(map[string]ExpressionFunction) + return NewEvaluableExpressionWithFunctions(expression, functions) +} + +/* + Similar to [NewEvaluableExpression], except that instead of a string, an already-tokenized expression is given. + This is useful in cases where you may be generating an expression automatically, or using some other parser (e.g., to parse from a query language) +*/ +func NewEvaluableExpressionFromTokens(tokens []ExpressionToken) (*EvaluableExpression, error) { + + var ret *EvaluableExpression + var err error + + ret = new(EvaluableExpression) + ret.QueryDateFormat = isoDateFormat + + err = checkBalance(tokens) + if err != nil { + return nil, err + } + + err = checkExpressionSyntax(tokens) + if err != nil { + return nil, err + } + + ret.tokens, err = optimizeTokens(tokens) + if err != nil { + return nil, err + } + + ret.evaluationStages, err = planStages(ret.tokens) + if err != nil { + return nil, err + } + + ret.ChecksTypes = true + return ret, nil +} + +/* + Similar to [NewEvaluableExpression], except enables the use of user-defined functions. + Functions passed into this will be available to the expression. +*/ +func NewEvaluableExpressionWithFunctions(expression string, functions map[string]ExpressionFunction) (*EvaluableExpression, error) { + + var ret *EvaluableExpression + var err error + + ret = new(EvaluableExpression) + ret.QueryDateFormat = isoDateFormat + ret.inputExpression = expression + + ret.tokens, err = parseTokens(expression, functions) + if err != nil { + return nil, err + } + + err = checkBalance(ret.tokens) + if err != nil { + return nil, err + } + + err = checkExpressionSyntax(ret.tokens) + if err != nil { + return nil, err + } + + ret.tokens, err = optimizeTokens(ret.tokens) + if err != nil { + return nil, err + } + + ret.evaluationStages, err = planStages(ret.tokens) + if err != nil { + return nil, err + } + + ret.ChecksTypes = true + return ret, nil +} + +/* + Same as `Eval`, but automatically wraps a map of parameters into a `govalute.Parameters` structure. +*/ +func (this EvaluableExpression) Evaluate(parameters map[string]interface{}) (interface{}, error) { + + if parameters == nil { + return this.Eval(nil) + } + + return this.Eval(MapParameters(parameters)) +} + +/* + Runs the entire expression using the given [parameters]. + e.g., If the expression contains a reference to the variable "foo", it will be taken from `parameters.Get("foo")`. + + This function returns errors if the combination of expression and parameters cannot be run, + such as if a variable in the expression is not present in [parameters]. + + In all non-error circumstances, this returns the single value result of the expression and parameters given. + e.g., if the expression is "1 + 1", this will return 2.0. + e.g., if the expression is "foo + 1" and parameters contains "foo" = 2, this will return 3.0 +*/ +func (this EvaluableExpression) Eval(parameters Parameters) (interface{}, error) { + + if this.evaluationStages == nil { + return nil, nil + } + + if parameters != nil { + parameters = &sanitizedParameters{parameters} + } else { + parameters = DUMMY_PARAMETERS + } + + return this.evaluateStage(this.evaluationStages, parameters) +} + +func (this EvaluableExpression) evaluateStage(stage *evaluationStage, parameters Parameters) (interface{}, error) { + + var left, right interface{} + var err error + + if stage.leftStage != nil { + left, err = this.evaluateStage(stage.leftStage, parameters) + if err != nil { + return nil, err + } + } + + if stage.isShortCircuitable() { + switch stage.symbol { + case AND: + if left == false { + return false, nil + } + case OR: + if left == true { + return true, nil + } + case COALESCE: + if left != nil { + return left, nil + } + + case TERNARY_TRUE: + if left == false { + right = shortCircuitHolder + } + case TERNARY_FALSE: + if left != nil { + right = shortCircuitHolder + } + } + } + + if right != shortCircuitHolder && stage.rightStage != nil { + right, err = this.evaluateStage(stage.rightStage, parameters) + if err != nil { + return nil, err + } + } + + if this.ChecksTypes { + if stage.typeCheck == nil { + + err = typeCheck(stage.leftTypeCheck, left, stage.symbol, stage.typeErrorFormat) + if err != nil { + return nil, err + } + + err = typeCheck(stage.rightTypeCheck, right, stage.symbol, stage.typeErrorFormat) + if err != nil { + return nil, err + } + } else { + // special case where the type check needs to know both sides to determine if the operator can handle it + if !stage.typeCheck(left, right) { + errorMsg := fmt.Sprintf(stage.typeErrorFormat, left, stage.symbol.String()) + return nil, errors.New(errorMsg) + } + } + } + + return stage.operator(left, right, parameters) +} + +func typeCheck(check stageTypeCheck, value interface{}, symbol OperatorSymbol, format string) error { + + if check == nil { + return nil + } + + if check(value) { + return nil + } + + errorMsg := fmt.Sprintf(format, value, symbol.String()) + return errors.New(errorMsg) +} + +/* + Returns an array representing the ExpressionTokens that make up this expression. +*/ +func (this EvaluableExpression) Tokens() []ExpressionToken { + + return this.tokens +} + +/* + Returns the original expression used to create this EvaluableExpression. +*/ +func (this EvaluableExpression) String() string { + + return this.inputExpression +} + +/* + Returns an array representing the variables contained in this EvaluableExpression. +*/ +func (this EvaluableExpression) Vars() []string { + var varlist []string + for _, val := range this.Tokens() { + if val.Kind == VARIABLE { + varlist = append(varlist, val.Value.(string)) + } + } + return varlist +} diff --git a/vendor/github.com/casbin/govaluate/EvaluableExpression_sql.go b/vendor/github.com/casbin/govaluate/EvaluableExpression_sql.go new file mode 100644 index 0000000000..52409fa240 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/EvaluableExpression_sql.go @@ -0,0 +1,167 @@ +package govaluate + +import ( + "errors" + "fmt" + "regexp" + "time" +) + +/* +Returns a string representing this expression as if it were written in SQL. +This function assumes that all parameters exist within the same table, and that the table essentially represents +a serialized object of some sort (e.g., hibernate). +If your data model is more normalized, you may need to consider iterating through each actual token given by `Tokens()` +to create your query. + +Boolean values are considered to be "1" for true, "0" for false. + +Times are formatted according to this.QueryDateFormat. +*/ +func (this EvaluableExpression) ToSQLQuery() (string, error) { + + var stream *tokenStream + var transactions *expressionOutputStream + var transaction string + var err error + + stream = newTokenStream(this.tokens) + transactions = new(expressionOutputStream) + + for stream.hasNext() { + + transaction, err = this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + transactions.add(transaction) + } + + return transactions.createString(" "), nil +} + +func (this EvaluableExpression) findNextSQLString(stream *tokenStream, transactions *expressionOutputStream) (string, error) { + + var token ExpressionToken + var ret string + + token = stream.next() + + switch token.Kind { + + case STRING: + ret = fmt.Sprintf("'%v'", token.Value) + case PATTERN: + ret = fmt.Sprintf("'%s'", token.Value.(*regexp.Regexp).String()) + case TIME: + ret = fmt.Sprintf("'%s'", token.Value.(time.Time).Format(this.QueryDateFormat)) + + case LOGICALOP: + switch logicalSymbols[token.Value.(string)] { + + case AND: + ret = "AND" + case OR: + ret = "OR" + } + + case BOOLEAN: + if token.Value.(bool) { + ret = "1" + } else { + ret = "0" + } + + case VARIABLE: + ret = fmt.Sprintf("[%s]", token.Value.(string)) + + case NUMERIC: + ret = fmt.Sprintf("%g", token.Value.(float64)) + + case COMPARATOR: + switch comparatorSymbols[token.Value.(string)] { + + case EQ: + ret = "=" + case NEQ: + ret = "<>" + case REQ: + ret = "RLIKE" + case NREQ: + ret = "NOT RLIKE" + default: + ret = fmt.Sprintf("%s", token.Value) + } + + case TERNARY: + + switch ternarySymbols[token.Value.(string)] { + + case COALESCE: + + left := transactions.rollback() + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("COALESCE(%v, %v)", left, right) + case TERNARY_TRUE: + fallthrough + case TERNARY_FALSE: + return "", errors.New("Ternary operators are unsupported in SQL output") + } + case PREFIX: + switch prefixSymbols[token.Value.(string)] { + + case INVERT: + ret = "NOT" + default: + + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("%s%s", token.Value.(string), right) + } + case MODIFIER: + + switch modifierSymbols[token.Value.(string)] { + + case EXPONENT: + + left := transactions.rollback() + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("POW(%s, %s)", left, right) + case MODULUS: + + left := transactions.rollback() + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("MOD(%s, %s)", left, right) + default: + ret = fmt.Sprintf("%s", token.Value) + } + case CLAUSE: + ret = "(" + case CLAUSE_CLOSE: + ret = ")" + case SEPARATOR: + ret = "," + + default: + errorMsg := fmt.Sprintf("Unrecognized query token '%s' of kind '%s'", token.Value, token.Kind) + return "", errors.New(errorMsg) + } + + return ret, nil +} diff --git a/vendor/github.com/casbin/govaluate/ExpressionToken.go b/vendor/github.com/casbin/govaluate/ExpressionToken.go new file mode 100644 index 0000000000..f849f3813b --- /dev/null +++ b/vendor/github.com/casbin/govaluate/ExpressionToken.go @@ -0,0 +1,9 @@ +package govaluate + +/* + Represents a single parsed token. +*/ +type ExpressionToken struct { + Kind TokenKind + Value interface{} +} diff --git a/vendor/github.com/casbin/govaluate/LICENSE b/vendor/github.com/casbin/govaluate/LICENSE new file mode 100644 index 0000000000..24b9b45919 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2016 George Lester + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/casbin/govaluate/MANUAL.md b/vendor/github.com/casbin/govaluate/MANUAL.md new file mode 100644 index 0000000000..e065828516 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/MANUAL.md @@ -0,0 +1,176 @@ +govaluate +==== + +This library contains quite a lot of functionality, this document is meant to be formal documentation on the operators and features of it. +Some of this documentation may duplicate what's in README.md, but should never conflict. + +# Types + +This library only officially deals with four types; `float64`, `bool`, `string`, and arrays. + +All numeric literals, with or without a radix, will be converted to `float64` for evaluation. For instance; in practice, there is no difference between the literals "1.0" and "1", they both end up as `float64`. This matters to users because if you intend to return numeric values from your expressions, then the returned value will be `float64`, not any other numeric type. + +Any string _literal_ (not parameter) which is interpretable as a date will be converted to a `float64` representation of that date's unix time. Any `time.Time` parameters will not be operable with these date literals; such parameters will need to use the `time.Time.Unix()` method to get a numeric representation. + +Arrays are untyped, and can be mixed-type. Internally they're all just `interface{}`. Only two operators can interact with arrays, `IN` and `,`. All other operators will refuse to operate on arrays. + +# Operators + +## Modifiers + +### Addition, concatenation `+` + +If either left or right sides of the `+` operator are a `string`, then this operator will perform string concatenation and return that result. If neither are string, then both must be numeric, and this will return a numeric result. + +Any other case is invalid. + +### Arithmetic `-` `*` `/` `**` `%` + +`**` refers to "take to the power of". For instance, `3 ** 4` == 81. + +* _Left side_: numeric +* _Right side_: numeric +* _Returns_: numeric + +### Bitwise shifts, masks `>>` `<<` `|` `&` `^` + +All of these operators convert their `float64` left and right sides to `int64`, perform their operation, and then convert back. +Given how this library assumes numeric are represented (as `float64`), it is unlikely that this behavior will change, even though it may cause havoc with extremely large or small numbers. + +* _Left side_: numeric +* _Right side_: numeric +* _Returns_: numeric + +### Negation `-` + +Prefix only. This can never have a left-hand value. + +* _Right side_: numeric +* _Returns_: numeric + +### Inversion `!` + +Prefix only. This can never have a left-hand value. + +* _Right side_: bool +* _Returns_: bool + +### Bitwise NOT `~` + +Prefix only. This can never have a left-hand value. + +* _Right side_: numeric +* _Returns_: numeric + +## Logical Operators + +For all logical operators, this library will short-circuit the operation if the left-hand side is sufficient to determine what to do. For instance, `true || expensiveOperation()` will not actually call `expensiveOperation()`, since it knows the left-hand side is `true`. + +### Logical AND/OR `&&` `||` + +* _Left side_: bool +* _Right side_: bool +* _Returns_: bool + +### Ternary true `?` + +Checks if the left side is `true`. If so, returns the right side. If the left side is `false`, returns `nil`. +In practice, this is commonly used with the other ternary operator. + +* _Left side_: bool +* _Right side_: Any type. +* _Returns_: Right side or `nil` + +### Ternary false `:` + +Checks if the left side is `nil`. If so, returns the right side. If the left side is non-nil, returns the left side. +In practice, this is commonly used with the other ternary operator. + +* _Left side_: Any type. +* _Right side_: Any type. +* _Returns_: Right side or `nil` + +### Null coalescence `??` + +Similar to the C# operator. If the left value is non-nil, it returns that. If not, then the right-value is returned. + +* _Left side_: Any type. +* _Right side_: Any type. +* _Returns_: No specific type - whichever is passed to it. + +## Comparators + +### Numeric/lexicographic comparators `>` `<` `>=` `<=` + +If both sides are numeric, this returns the usual greater/lesser behavior that would be expected. +If both sides are string, this returns the lexicographic comparison of the strings. This uses Go's standard lexicographic compare. + +* _Accepts_: Left and right side must either be both string, or both numeric. +* _Returns_: bool + +### Regex comparators `=~` `!~` + +These use go's standard `regexp` flavor of regex. The left side is expected to be the candidate string, the right side is the pattern. `=~` returns whether or not the candidate string matches the regex pattern given on the right. `!~` is the inverted version of the same logic. + +* _Left side_: string +* _Right side_: string +* _Returns_: bool + +## Arrays + +### Separator `,` + +The separator, always paired with parenthesis, creates arrays. It must always have both a left and right-hand value, so for instance `(, 0)` and `(0,)` are invalid uses of it. + +Again, this should always be used with parenthesis; like `(1, 2, 3, 4)`. + +### Membership `IN` + +The only operator with a text name, this operator checks the right-hand side array to see if it contains a value that is equal to the left-side value. +Equality is determined by the use of the `==` operator, and this library doesn't check types between the values. Any two values, when cast to `interface{}`, and can still be checked for equality with `==` will act as expected. + +Note that you can use a parameter for the array, but it must be an `[]interface{}`. + +* _Left side_: Any type. +* _Right side_: array +* _Returns_: bool + +# Parameters + +Parameters must be passed in every time the expression is evaluated. Parameters can be of any type, but will not cause errors unless actually used in an erroneous way. There is no difference in behavior for any of the above operators for parameters - they are type checked when used. + +All `int` and `float` values of any width will be converted to `float64` before use. + +At no point is the parameter structure, or any value thereof, modified by this library. + +## Alternates to maps + +The default form of parameters as a map may not serve your use case. You may have parameters in some other structure, you may want to change the no-parameter-found behavior, or maybe even just have some debugging print statements invoked when a parameter is accessed. + +To do this, define a type that implements the `govaluate.Parameters` interface. When you want to evaluate, instead call `EvaluableExpression.Eval` and pass your parameter structure. + +# Functions + +During expression parsing (_not_ evaluation), a map of functions can be given to `govaluate.NewEvaluableExpressionWithFunctions` (the lengthiest and finest of function names). The resultant expression will be able to invoke those functions during evaluation. Once parsed, an expression cannot have functions added or removed - a new expression will need to be created if you want to change the functions, or behavior of said functions. + +Functions always take the form `()`, including parens. Functions can have an empty list of parameters, like `()`, but still must have parens. + +If the expression contains something that looks like it ought to be a function (such as `foo()`), but no such function was given to it, it will error on parsing. + +Functions must be of type `map[string]govaluate.ExpressionFunction`. `ExpressionFunction`, for brevity, has the following signature: + +`func(args ...interface{}) (interface{}, error)` + +Where `args` is whatever is passed to the function when called. If a non-nil error is returned from a function during evaluation, the evaluation stops and ultimately returns that error to the caller of `Evaluate()` or `Eval()`. + +## Built-in functions + +There aren't any builtin functions. The author is opposed to maintaining a standard library of functions to be used. + +Every use case of this library is different, and even in simple use cases (such as parameters, see above) different users need different behavior, naming, or even functionality. The author prefers that users make their own decisions about what functions they need, and how they operate. + +# Equality + +The `==` and `!=` operators involve a moderately complex workflow. They use [`reflect.DeepEqual`](https://golang.org/pkg/reflect/#DeepEqual). This is for complicated reasons, but there are some types in Go that cannot be compared with the native `==` operator. Arrays, in particular, cannot be compared - Go will panic if you try. One might assume this could be handled with the type checking system in `govaluate`, but unfortunately without reflection there is no way to know if a variable is a slice/array. Worse, structs can be incomparable if they _contain incomparable types_. + +It's all very complicated. Fortunately, Go includes the `reflect.DeepEqual` function to handle all the edge cases. Currently, `govaluate` uses that for all equality/inequality. diff --git a/vendor/github.com/casbin/govaluate/OperatorSymbol.go b/vendor/github.com/casbin/govaluate/OperatorSymbol.go new file mode 100644 index 0000000000..4b810658bd --- /dev/null +++ b/vendor/github.com/casbin/govaluate/OperatorSymbol.go @@ -0,0 +1,309 @@ +package govaluate + +/* + Represents the valid symbols for operators. + +*/ +type OperatorSymbol int + +const ( + VALUE OperatorSymbol = iota + LITERAL + NOOP + EQ + NEQ + GT + LT + GTE + LTE + REQ + NREQ + IN + + AND + OR + + PLUS + MINUS + BITWISE_AND + BITWISE_OR + BITWISE_XOR + BITWISE_LSHIFT + BITWISE_RSHIFT + MULTIPLY + DIVIDE + MODULUS + EXPONENT + + NEGATE + INVERT + BITWISE_NOT + + TERNARY_TRUE + TERNARY_FALSE + COALESCE + + FUNCTIONAL + ACCESS + SEPARATE +) + +type operatorPrecedence int + +const ( + noopPrecedence operatorPrecedence = iota + valuePrecedence + functionalPrecedence + prefixPrecedence + exponentialPrecedence + additivePrecedence + bitwisePrecedence + bitwiseShiftPrecedence + multiplicativePrecedence + comparatorPrecedence + ternaryPrecedence + logicalAndPrecedence + logicalOrPrecedence + separatePrecedence +) + +func findOperatorPrecedenceForSymbol(symbol OperatorSymbol) operatorPrecedence { + + switch symbol { + case NOOP: + return noopPrecedence + case VALUE: + return valuePrecedence + case EQ: + fallthrough + case NEQ: + fallthrough + case GT: + fallthrough + case LT: + fallthrough + case GTE: + fallthrough + case LTE: + fallthrough + case REQ: + fallthrough + case NREQ: + fallthrough + case IN: + return comparatorPrecedence + case AND: + return logicalAndPrecedence + case OR: + return logicalOrPrecedence + case BITWISE_AND: + fallthrough + case BITWISE_OR: + fallthrough + case BITWISE_XOR: + return bitwisePrecedence + case BITWISE_LSHIFT: + fallthrough + case BITWISE_RSHIFT: + return bitwiseShiftPrecedence + case PLUS: + fallthrough + case MINUS: + return additivePrecedence + case MULTIPLY: + fallthrough + case DIVIDE: + fallthrough + case MODULUS: + return multiplicativePrecedence + case EXPONENT: + return exponentialPrecedence + case BITWISE_NOT: + fallthrough + case NEGATE: + fallthrough + case INVERT: + return prefixPrecedence + case COALESCE: + fallthrough + case TERNARY_TRUE: + fallthrough + case TERNARY_FALSE: + return ternaryPrecedence + case ACCESS: + fallthrough + case FUNCTIONAL: + return functionalPrecedence + case SEPARATE: + return separatePrecedence + } + + return valuePrecedence +} + +/* + Map of all valid comparators, and their string equivalents. + Used during parsing of expressions to determine if a symbol is, in fact, a comparator. + Also used during evaluation to determine exactly which comparator is being used. +*/ +var comparatorSymbols = map[string]OperatorSymbol{ + "==": EQ, + "!=": NEQ, + ">": GT, + ">=": GTE, + "<": LT, + "<=": LTE, + "=~": REQ, + "!~": NREQ, + "in": IN, +} + +var logicalSymbols = map[string]OperatorSymbol{ + "&&": AND, + "||": OR, +} + +var bitwiseSymbols = map[string]OperatorSymbol{ + "^": BITWISE_XOR, + "&": BITWISE_AND, + "|": BITWISE_OR, +} + +var bitwiseShiftSymbols = map[string]OperatorSymbol{ + ">>": BITWISE_RSHIFT, + "<<": BITWISE_LSHIFT, +} + +var additiveSymbols = map[string]OperatorSymbol{ + "+": PLUS, + "-": MINUS, +} + +var multiplicativeSymbols = map[string]OperatorSymbol{ + "*": MULTIPLY, + "/": DIVIDE, + "%": MODULUS, +} + +var exponentialSymbolsS = map[string]OperatorSymbol{ + "**": EXPONENT, +} + +var prefixSymbols = map[string]OperatorSymbol{ + "-": NEGATE, + "!": INVERT, + "~": BITWISE_NOT, +} + +var ternarySymbols = map[string]OperatorSymbol{ + "?": TERNARY_TRUE, + ":": TERNARY_FALSE, + "??": COALESCE, +} + +// this is defined separately from additiveSymbols et al because it's needed for parsing, not stage planning. +var modifierSymbols = map[string]OperatorSymbol{ + "+": PLUS, + "-": MINUS, + "*": MULTIPLY, + "/": DIVIDE, + "%": MODULUS, + "**": EXPONENT, + "&": BITWISE_AND, + "|": BITWISE_OR, + "^": BITWISE_XOR, + ">>": BITWISE_RSHIFT, + "<<": BITWISE_LSHIFT, +} + +var separatorSymbols = map[string]OperatorSymbol{ + ",": SEPARATE, +} + +/* + Returns true if this operator is contained by the given array of candidate symbols. + False otherwise. +*/ +func (this OperatorSymbol) IsModifierType(candidate []OperatorSymbol) bool { + + for _, symbolType := range candidate { + if this == symbolType { + return true + } + } + + return false +} + +/* + Generally used when formatting type check errors. + We could store the stringified symbol somewhere else and not require a duplicated codeblock to translate + OperatorSymbol to string, but that would require more memory, and another field somewhere. + Adding operators is rare enough that we just stringify it here instead. +*/ +func (this OperatorSymbol) String() string { + + switch this { + case NOOP: + return "NOOP" + case VALUE: + return "VALUE" + case EQ: + return "=" + case NEQ: + return "!=" + case GT: + return ">" + case LT: + return "<" + case GTE: + return ">=" + case LTE: + return "<=" + case REQ: + return "=~" + case NREQ: + return "!~" + case AND: + return "&&" + case OR: + return "||" + case IN: + return "in" + case BITWISE_AND: + return "&" + case BITWISE_OR: + return "|" + case BITWISE_XOR: + return "^" + case BITWISE_LSHIFT: + return "<<" + case BITWISE_RSHIFT: + return ">>" + case PLUS: + return "+" + case MINUS: + return "-" + case MULTIPLY: + return "*" + case DIVIDE: + return "/" + case MODULUS: + return "%" + case EXPONENT: + return "**" + case NEGATE: + return "-" + case INVERT: + return "!" + case BITWISE_NOT: + return "~" + case TERNARY_TRUE: + return "?" + case TERNARY_FALSE: + return ":" + case COALESCE: + return "??" + } + return "" +} diff --git a/vendor/github.com/casbin/govaluate/README.md b/vendor/github.com/casbin/govaluate/README.md new file mode 100644 index 0000000000..576a9df196 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/README.md @@ -0,0 +1,232 @@ +govaluate +==== + +[![Build Status](https://github.com/casbin/govaluate/actions/workflows/build.yml/badge.svg)](https://github.com/casbin/govaluate/actions/workflows/build.yml) +[![Godoc](https://godoc.org/github.com/casbin/govaluate?status.svg)](https://pkg.go.dev/github.com/casbin/govaluate) +[![Go Report Card](https://goreportcard.com/badge/github.com/casbin/govaluate)](https://goreportcard.com/report/github.com/casbin/govaluate) + +Provides support for evaluating arbitrary C-like artithmetic/string expressions. + +Why can't you just write these expressions in code? +-- + +Sometimes, you can't know ahead-of-time what an expression will look like, or you want those expressions to be configurable. +Perhaps you've got a set of data running through your application, and you want to allow your users to specify some validations to run on it before committing it to a database. Or maybe you've written a monitoring framework which is capable of gathering a bunch of metrics, then evaluating a few expressions to see if any metrics should be alerted upon, but the conditions for alerting are different for each monitor. + +A lot of people wind up writing their own half-baked style of evaluation language that fits their needs, but isn't complete. Or they wind up baking the expression into the actual executable, even if they know it's subject to change. These strategies may work, but they take time to implement, time for users to learn, and induce technical debt as requirements change. This library is meant to cover all the normal C-like expressions, so that you don't have to reinvent one of the oldest wheels on a computer. + +How do I use it? +-- + +You create a new EvaluableExpression, then call "Evaluate" on it. + +```go + expression, err := govaluate.NewEvaluableExpression("10 > 0"); + result, err := expression.Evaluate(nil); + // result is now set to "true", the bool value. +``` + +Cool, but how about with parameters? + +```go + expression, err := govaluate.NewEvaluableExpression("foo > 0"); + + parameters := make(map[string]interface{}, 8) + parameters["foo"] = -1; + + result, err := expression.Evaluate(parameters); + // result is now set to "false", the bool value. +``` + +That's cool, but we can almost certainly have done all that in code. What about a complex use case that involves some math? + +```go + expression, err := govaluate.NewEvaluableExpression("(requests_made * requests_succeeded / 100) >= 90"); + + parameters := make(map[string]interface{}, 8) + parameters["requests_made"] = 100; + parameters["requests_succeeded"] = 80; + + result, err := expression.Evaluate(parameters); + // result is now set to "false", the bool value. +``` + +Or maybe you want to check the status of an alive check ("smoketest") page, which will be a string? + +```go + expression, err := govaluate.NewEvaluableExpression("http_response_body == 'service is ok'"); + + parameters := make(map[string]interface{}, 8) + parameters["http_response_body"] = "service is ok"; + + result, err := expression.Evaluate(parameters); + // result is now set to "true", the bool value. +``` + +These examples have all returned boolean values, but it's equally possible to return numeric ones. + +```go + expression, err := govaluate.NewEvaluableExpression("(mem_used / total_mem) * 100"); + + parameters := make(map[string]interface{}, 8) + parameters["total_mem"] = 1024; + parameters["mem_used"] = 512; + + result, err := expression.Evaluate(parameters); + // result is now set to "50.0", the float64 value. +``` + +You can also do date parsing, though the formats are somewhat limited. Stick to RF3339, ISO8061, unix date, or ruby date formats. If you're having trouble getting a date string to parse, check the list of formats actually used: [parsing.go:248](https://github.com/casbin/govaluate/blob/0580e9b47a69125afa0e4ebd1cf93c49eb5a43ec/parsing.go#L258). + +```go + expression, err := govaluate.NewEvaluableExpression("'2014-01-02' > '2014-01-01 23:59:59'"); + result, err := expression.Evaluate(nil); + + // result is now set to true +``` + +Expressions are parsed once, and can be re-used multiple times. Parsing is the compute-intensive phase of the process, so if you intend to use the same expression with different parameters, just parse it once. Like so; + +```go + expression, err := govaluate.NewEvaluableExpression("response_time <= 100"); + parameters := make(map[string]interface{}, 8) + + for { + parameters["response_time"] = pingSomething(); + result, err := expression.Evaluate(parameters) + } +``` + +The normal C-standard order of operators is respected. When writing an expression, be sure that you either order the operators correctly, or use parenthesis to clarify which portions of an expression should be run first. + +Escaping characters +-- + +Sometimes you'll have parameters that have spaces, slashes, pluses, ampersands or some other character +that this library interprets as something special. For example, the following expression will not +act as one might expect: + + "response-time < 100" + +As written, the library will parse it as "[response] minus [time] is less than 100". In reality, +"response-time" is meant to be one variable that just happens to have a dash in it. + +There are two ways to work around this. First, you can escape the entire parameter name: + + "[response-time] < 100" + +Or you can use backslashes to escape only the minus sign. + + "response\\-time < 100" + +Backslashes can be used anywhere in an expression to escape the very next character. Square bracketed parameter names can be used instead of plain parameter names at any time. + +Functions +-- + +You may have cases where you want to call a function on a parameter during execution of the expression. Perhaps you want to aggregate some set of data, but don't know the exact aggregation you want to use until you're writing the expression itself. Or maybe you have a mathematical operation you want to perform, for which there is no operator; like `log` or `tan` or `sqrt`. For cases like this, you can provide a map of functions to `NewEvaluableExpressionWithFunctions`, which will then be able to use them during execution. For instance; + +```go + functions := map[string]govaluate.ExpressionFunction { + "strlen": func(args ...interface{}) (interface{}, error) { + length := len(args[0].(string)) + return (float64)(length), nil + }, + } + + expString := "strlen('someReallyLongInputString') <= 16" + expression, _ := govaluate.NewEvaluableExpressionWithFunctions(expString, functions) + + result, _ := expression.Evaluate(nil) + // result is now "false", the boolean value +``` + +Functions can accept any number of arguments, correctly handles nested functions, and arguments can be of any type (even if none of this library's operators support evaluation of that type). For instance, each of these usages of functions in an expression are valid (assuming that the appropriate functions and parameters are given): + +```go +"sqrt(x1 ** y1, x2 ** y2)" +"max(someValue, abs(anotherValue), 10 * lastValue)" +``` + +Functions cannot be passed as parameters, they must be known at the time when the expression is parsed, and are unchangeable after parsing. + +Accessors +-- + +If you have structs in your parameters, you can access their fields and methods in the usual way. For instance, given a struct that has a method "Echo", present in the parameters as `foo`, the following is valid: + + "foo.Echo('hello world')" + +Fields are accessed in a similar way. Assuming `foo` has a field called "Length": + + "foo.Length > 9000" + +The values of a `map` are accessed in the same way. Assuming the parameter `foo` is `map[string]int{ "bar": 1 }` + + "foo.bar == 1" + +Accessors can be nested to any depth, like the following + + "foo.Bar.Baz.SomeFunction()" + +This may be convenient, but note that using accessors involves a _lot_ of reflection. This makes the expression about four times slower than just using a parameter (consult the benchmarks for more precise measurements on your system). +If at all reasonable, the author recommends extracting the values you care about into a parameter map beforehand, or defining a struct that implements the `Parameters` interface, and which grabs fields as required. If there are functions you want to use, it's better to pass them as expression functions (see the above section). These approaches use no reflection, and are designed to be fast and clean. + +What operators and types does this support? +-- + +* Modifiers: `+` `-` `/` `*` `&` `|` `^` `**` `%` `>>` `<<` +* Comparators: `>` `>=` `<` `<=` `==` `!=` `=~` `!~` +* Logical ops: `||` `&&` +* Numeric constants, as 64-bit floating point (`12345.678`) +* String constants (single quotes: `'foobar'`) +* Date constants (single quotes, using any permutation of RFC3339, ISO8601, ruby date, or unix date; date parsing is automatically tried with any string constant) +* Boolean constants: `true` `false` +* Parenthesis to control order of evaluation `(` `)` +* Arrays (anything separated by `,` within parenthesis: `(1, 2, 'foo')`) +* Prefixes: `!` `-` `~` +* Ternary conditional: `?` `:` +* Null coalescence: `??` + +See [MANUAL.md](https://github.com/casbin/govaluate/blob/master/MANUAL.md) for exacting details on what types each operator supports. + +Types +-- + +Some operators don't make sense when used with some types. For instance, what does it mean to get the modulo of a string? What happens if you check to see if two numbers are logically AND'ed together? + +Everyone has a different intuition about the answers to these questions. To prevent confusion, this library will _refuse to operate_ upon types for which there is not an unambiguous meaning for the operation. See [MANUAL.md](https://github.com/casbin/govaluate/blob/master/MANUAL.md) for details about what operators are valid for which types. + +Benchmarks +-- + +If you're concerned about the overhead of this library, a good range of benchmarks are built into this repo. You can run them with `go test -bench=.`. The library is built with an eye towards being quick, but has not been aggressively profiled and optimized. For most applications, though, it is completely fine. + +For a very rough idea of performance, here are the results output from a benchmark run on a 3rd-gen Macbook Pro (Linux Mint 17.1). + +``` +BenchmarkSingleParse-12 1000000 1382 ns/op +BenchmarkSimpleParse-12 200000 10771 ns/op +BenchmarkFullParse-12 30000 49383 ns/op +BenchmarkEvaluationSingle-12 50000000 30.1 ns/op +BenchmarkEvaluationNumericLiteral-12 10000000 119 ns/op +BenchmarkEvaluationLiteralModifiers-12 10000000 236 ns/op +BenchmarkEvaluationParameters-12 5000000 260 ns/op +BenchmarkEvaluationParametersModifiers-12 3000000 547 ns/op +BenchmarkComplexExpression-12 2000000 963 ns/op +BenchmarkRegexExpression-12 100000 20357 ns/op +BenchmarkConstantRegexExpression-12 1000000 1392 ns/op +ok +``` + +API Breaks +-- + +While this library has very few cases which will ever result in an API break, it can happen. If you are using this in production, vendor the commit you've tested against, or use gopkg.in to redirect your import (e.g., `import "gopkg.in/casbin/govaluate.v1"`). Master branch (while infrequent) _may_ at some point contain API breaking changes, and the author will have no way to communicate these to downstreams, other than creating a new major release. + +Releases will explicitly state when an API break happens, and if they do not specify an API break it should be safe to upgrade. + +License +-- + +This project is licensed under the MIT general use license. You're free to integrate, fork, and play with this code as you feel fit without consulting the author, as long as you provide proper credit to the author in your works. diff --git a/vendor/github.com/casbin/govaluate/TokenKind.go b/vendor/github.com/casbin/govaluate/TokenKind.go new file mode 100644 index 0000000000..7c9516d2df --- /dev/null +++ b/vendor/github.com/casbin/govaluate/TokenKind.go @@ -0,0 +1,75 @@ +package govaluate + +/* + Represents all valid types of tokens that a token can be. +*/ +type TokenKind int + +const ( + UNKNOWN TokenKind = iota + + PREFIX + NUMERIC + BOOLEAN + STRING + PATTERN + TIME + VARIABLE + FUNCTION + SEPARATOR + ACCESSOR + + COMPARATOR + LOGICALOP + MODIFIER + + CLAUSE + CLAUSE_CLOSE + + TERNARY +) + +/* + GetTokenKindString returns a string that describes the given TokenKind. + e.g., when passed the NUMERIC TokenKind, this returns the string "NUMERIC". +*/ +func (kind TokenKind) String() string { + + switch kind { + + case PREFIX: + return "PREFIX" + case NUMERIC: + return "NUMERIC" + case BOOLEAN: + return "BOOLEAN" + case STRING: + return "STRING" + case PATTERN: + return "PATTERN" + case TIME: + return "TIME" + case VARIABLE: + return "VARIABLE" + case FUNCTION: + return "FUNCTION" + case SEPARATOR: + return "SEPARATOR" + case COMPARATOR: + return "COMPARATOR" + case LOGICALOP: + return "LOGICALOP" + case MODIFIER: + return "MODIFIER" + case CLAUSE: + return "CLAUSE" + case CLAUSE_CLOSE: + return "CLAUSE_CLOSE" + case TERNARY: + return "TERNARY" + case ACCESSOR: + return "ACCESSOR" + } + + return "UNKNOWN" +} diff --git a/vendor/github.com/casbin/govaluate/evaluationStage.go b/vendor/github.com/casbin/govaluate/evaluationStage.go new file mode 100644 index 0000000000..2ea68df3b8 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/evaluationStage.go @@ -0,0 +1,541 @@ +package govaluate + +import ( + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strings" + "unicode" +) + +const ( + logicalErrorFormat string = "Value '%v' cannot be used with the logical operator '%v', it is not a bool" + modifierErrorFormat string = "Value '%v' cannot be used with the modifier '%v', it is not a number" + comparatorErrorFormat string = "Value '%v' cannot be used with the comparator '%v', it is not a number" + ternaryErrorFormat string = "Value '%v' cannot be used with the ternary operator '%v', it is not a bool" + prefixErrorFormat string = "Value '%v' cannot be used with the prefix '%v'" +) + +type evaluationOperator func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) +type stageTypeCheck func(value interface{}) bool +type stageCombinedTypeCheck func(left interface{}, right interface{}) bool + +type evaluationStage struct { + symbol OperatorSymbol + + leftStage, rightStage *evaluationStage + + // the operation that will be used to evaluate this stage (such as adding [left] to [right] and return the result) + operator evaluationOperator + + // ensures that both left and right values are appropriate for this stage. Returns an error if they aren't operable. + leftTypeCheck stageTypeCheck + rightTypeCheck stageTypeCheck + + // if specified, will override whatever is used in "leftTypeCheck" and "rightTypeCheck". + // primarily used for specific operators that don't care which side a given type is on, but still requires one side to be of a given type + // (like string concat) + typeCheck stageCombinedTypeCheck + + // regardless of which type check is used, this string format will be used as the error message for type errors + typeErrorFormat string +} + +var ( + _true = interface{}(true) + _false = interface{}(false) +) + +func (this *evaluationStage) swapWith(other *evaluationStage) { + + temp := *other + other.setToNonStage(*this) + this.setToNonStage(temp) +} + +func (this *evaluationStage) setToNonStage(other evaluationStage) { + + this.symbol = other.symbol + this.operator = other.operator + this.leftTypeCheck = other.leftTypeCheck + this.rightTypeCheck = other.rightTypeCheck + this.typeCheck = other.typeCheck + this.typeErrorFormat = other.typeErrorFormat +} + +func (this *evaluationStage) isShortCircuitable() bool { + + switch this.symbol { + case AND: + fallthrough + case OR: + fallthrough + case TERNARY_TRUE: + fallthrough + case TERNARY_FALSE: + fallthrough + case COALESCE: + return true + } + + return false +} + +func noopStageRight(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return right, nil +} + +func addStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + // string concat if either are strings + if isString(left) || isString(right) { + return fmt.Sprintf("%v%v", left, right), nil + } + + return left.(float64) + right.(float64), nil +} +func subtractStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return left.(float64) - right.(float64), nil +} +func multiplyStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return left.(float64) * right.(float64), nil +} +func divideStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return left.(float64) / right.(float64), nil +} +func exponentStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return math.Pow(left.(float64), right.(float64)), nil +} +func modulusStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return math.Mod(left.(float64), right.(float64)), nil +} +func gteStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) >= right.(string)), nil + } + return boolIface(left.(float64) >= right.(float64)), nil +} +func gtStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) > right.(string)), nil + } + return boolIface(left.(float64) > right.(float64)), nil +} +func lteStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) <= right.(string)), nil + } + return boolIface(left.(float64) <= right.(float64)), nil +} +func ltStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) < right.(string)), nil + } + return boolIface(left.(float64) < right.(float64)), nil +} +func equalStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(reflect.DeepEqual(left, right)), nil +} +func notEqualStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(!reflect.DeepEqual(left, right)), nil +} +func andStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(left.(bool) && right.(bool)), nil +} +func orStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(left.(bool) || right.(bool)), nil +} +func negateStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return -right.(float64), nil +} +func invertStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(!right.(bool)), nil +} +func bitwiseNotStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(^int64(right.(float64))), nil +} +func ternaryIfStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if left.(bool) { + return right, nil + } + return nil, nil +} +func ternaryElseStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if left != nil { + return left, nil + } + return right, nil +} + +func regexStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + var pattern *regexp.Regexp + var err error + + switch right := right.(type) { + case string: + pattern, err = regexp.Compile(right) + if err != nil { + return nil, fmt.Errorf("Unable to compile regexp pattern '%v': %v", right, err) + } + case *regexp.Regexp: + pattern = right + } + + return pattern.Match([]byte(left.(string))), nil +} + +func notRegexStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + ret, err := regexStage(left, right, parameters) + if err != nil { + return nil, err + } + + return !(ret.(bool)), nil +} + +func bitwiseOrStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(int64(left.(float64)) | int64(right.(float64))), nil +} +func bitwiseAndStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(int64(left.(float64)) & int64(right.(float64))), nil +} +func bitwiseXORStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(int64(left.(float64)) ^ int64(right.(float64))), nil +} +func leftShiftStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(uint64(left.(float64)) << uint64(right.(float64))), nil +} +func rightShiftStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(uint64(left.(float64)) >> uint64(right.(float64))), nil +} + +func makeParameterStage(parameterName string) evaluationOperator { + + return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + value, err := parameters.Get(parameterName) + if err != nil { + return nil, err + } + + return value, nil + } +} + +func makeLiteralStage(literal interface{}) evaluationOperator { + return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return literal, nil + } +} + +func makeFunctionStage(function ExpressionFunction) evaluationOperator { + + return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + if right == nil { + return function() + } + + switch right := right.(type) { + case []interface{}: + return function(right...) + default: + return function(right) + } + } +} + +func typeConvertParam(p reflect.Value, t reflect.Type) (ret reflect.Value, err error) { + defer func() { + if r := recover(); r != nil { + errorMsg := fmt.Sprintf("Argument type conversion failed: failed to convert '%s' to '%s'", p.Kind().String(), t.Kind().String()) + err = errors.New(errorMsg) + ret = p + } + }() + + return p.Convert(t), nil +} + +func typeConvertParams(method reflect.Value, params []reflect.Value) ([]reflect.Value, error) { + + methodType := method.Type() + numIn := methodType.NumIn() + numParams := len(params) + + if numIn != numParams { + if numIn > numParams { + return nil, fmt.Errorf("Too few arguments to parameter call: got %d arguments, expected %d", len(params), numIn) + } + return nil, fmt.Errorf("Too many arguments to parameter call: got %d arguments, expected %d", len(params), numIn) + } + + for i := 0; i < numIn; i++ { + t := methodType.In(i) + p := params[i] + pt := p.Type() + + if t.Kind() != pt.Kind() { + np, err := typeConvertParam(p, t) + if err != nil { + return nil, err + } + params[i] = np + } + } + + return params, nil +} + +func makeAccessorStage(pair []string) evaluationOperator { + + reconstructed := strings.Join(pair, ".") + + return func(left interface{}, right interface{}, parameters Parameters) (ret interface{}, err error) { + + var params []reflect.Value + + value, err := parameters.Get(pair[0]) + if err != nil { + return nil, err + } + + // while this library generally tries to handle panic-inducing cases on its own, + // accessors are a sticky case which have a lot of possible ways to fail. + // therefore every call to an accessor sets up a defer that tries to recover from panics, converting them to errors. + defer func() { + if r := recover(); r != nil { + errorMsg := fmt.Sprintf("Failed to access '%s': %v", reconstructed, r.(string)) + err = errors.New(errorMsg) + ret = nil + } + }() + + LOOP: + for i := 1; i < len(pair); i++ { + + coreValue := reflect.ValueOf(value) + + var corePtrVal reflect.Value + + // if this is a pointer, resolve it. + if coreValue.Kind() == reflect.Ptr { + corePtrVal = coreValue + coreValue = coreValue.Elem() + } + + var field reflect.Value + var method reflect.Value + + switch coreValue.Kind() { + case reflect.Struct: + // check if field is exported + firstCharacter := getFirstRune(pair[i]) + if unicode.ToUpper(firstCharacter) != firstCharacter { + errorMsg := fmt.Sprintf("Unable to access unexported field '%s' in '%s'", pair[i], pair[i-1]) + return nil, errors.New(errorMsg) + } + + field = coreValue.FieldByName(pair[i]) + if field != (reflect.Value{}) { + value = field.Interface() + continue LOOP + } + + method = coreValue.MethodByName(pair[i]) + if method == (reflect.Value{}) { + if corePtrVal.IsValid() { + method = corePtrVal.MethodByName(pair[i]) + } + } + case reflect.Map: + field = coreValue.MapIndex(reflect.ValueOf(pair[i])) + if field != (reflect.Value{}) { + inter := field.Interface() + if reflect.TypeOf(inter).Kind() == reflect.Func { + method = reflect.ValueOf(inter) + } else { + value = inter + continue LOOP + } + } + default: + return nil, errors.New("Unable to access '" + pair[i] + "', '" + pair[i-1] + "' is not a struct or map") + } + + if method == (reflect.Value{}) { + return nil, errors.New("No method or field '" + pair[i] + "' present on parameter '" + pair[i-1] + "'") + } + + switch right := right.(type) { + case []interface{}: + + givenParams := right + params = make([]reflect.Value, len(givenParams)) + for idx := range givenParams { + params[idx] = reflect.ValueOf(givenParams[idx]) + } + + default: + + if right == nil { + params = []reflect.Value{} + break + } + + params = []reflect.Value{reflect.ValueOf(right)} + } + + params, err = typeConvertParams(method, params) + + if err != nil { + return nil, errors.New("Method call failed - '" + pair[0] + "." + pair[1] + "': " + err.Error()) + } + + returned := method.Call(params) + retLength := len(returned) + + if retLength == 0 { + return nil, errors.New("Method call '" + pair[i-1] + "." + pair[i] + "' did not return any values.") + } + + if retLength == 1 { + + value = returned[0].Interface() + continue + } + + if retLength == 2 { + + errIface := returned[1].Interface() + err, validType := errIface.(error) + + if validType && errIface != nil { + return returned[0].Interface(), err + } + + value = returned[0].Interface() + continue + } + + return nil, errors.New("Method call '" + pair[0] + "." + pair[1] + "' did not return either one value, or a value and an error. Cannot interpret meaning.") + } + + value = castToFloat64(value) + return value, nil + } +} + +func separatorStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + var ret []interface{} + + switch left := left.(type) { + case []interface{}: + ret = append(left, right) + default: + ret = []interface{}{left, right} + } + + return ret, nil +} + +func inStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + for _, value := range right.([]interface{}) { + if left == value { + return true, nil + } + } + return false, nil +} + +// + +func isString(value interface{}) bool { + + switch value.(type) { + case string: + return true + } + return false +} + +func isRegexOrString(value interface{}) bool { + + switch value.(type) { + case string: + return true + case *regexp.Regexp: + return true + } + return false +} + +func isBool(value interface{}) bool { + switch value.(type) { + case bool: + return true + } + return false +} + +func isFloat64(value interface{}) bool { + switch value.(type) { + case float64: + return true + } + return false +} + +/* +Addition usually means between numbers, but can also mean string concat. +String concat needs one (or both) of the sides to be a string. +*/ +func additionTypeCheck(left interface{}, right interface{}) bool { + + if isFloat64(left) && isFloat64(right) { + return true + } + if !isString(left) && !isString(right) { + return false + } + return true +} + +/* +Comparison can either be between numbers, or lexicographic between two strings, +but never between the two. +*/ +func comparatorTypeCheck(left interface{}, right interface{}) bool { + + if isFloat64(left) && isFloat64(right) { + return true + } + if isString(left) && isString(right) { + return true + } + return false +} + +func isArray(value interface{}) bool { + switch value.(type) { + case []interface{}: + return true + } + return false +} + +/* +Converting a boolean to an interface{} requires an allocation. +We can use interned bools to avoid this cost. +*/ +func boolIface(b bool) interface{} { + if b { + return _true + } + return _false +} diff --git a/vendor/github.com/casbin/govaluate/expressionFunctions.go b/vendor/github.com/casbin/govaluate/expressionFunctions.go new file mode 100644 index 0000000000..ac6592b3f7 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/expressionFunctions.go @@ -0,0 +1,8 @@ +package govaluate + +/* + Represents a function that can be called from within an expression. + This method must return an error if, for any reason, it is unable to produce exactly one unambiguous result. + An error returned will halt execution of the expression. +*/ +type ExpressionFunction func(arguments ...interface{}) (interface{}, error) diff --git a/vendor/github.com/casbin/govaluate/expressionOutputStream.go b/vendor/github.com/casbin/govaluate/expressionOutputStream.go new file mode 100644 index 0000000000..88a8416392 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/expressionOutputStream.go @@ -0,0 +1,46 @@ +package govaluate + +import ( + "bytes" +) + +/* + Holds a series of "transactions" which represent each token as it is output by an outputter (such as ToSQLQuery()). + Some outputs (such as SQL) require a function call or non-c-like syntax to represent an expression. + To accomplish this, this struct keeps track of each translated token as it is output, and can return and rollback those transactions. +*/ +type expressionOutputStream struct { + transactions []string +} + +func (this *expressionOutputStream) add(transaction string) { + this.transactions = append(this.transactions, transaction) +} + +func (this *expressionOutputStream) rollback() string { + + index := len(this.transactions) - 1 + ret := this.transactions[index] + + this.transactions = this.transactions[:index] + return ret +} + +func (this *expressionOutputStream) createString(delimiter string) string { + + var retBuffer bytes.Buffer + var transaction string + + penultimate := len(this.transactions) - 1 + + for i := 0; i < penultimate; i++ { + + transaction = this.transactions[i] + + retBuffer.WriteString(transaction) + retBuffer.WriteString(delimiter) + } + retBuffer.WriteString(this.transactions[penultimate]) + + return retBuffer.String() +} diff --git a/vendor/github.com/casbin/govaluate/lexerState.go b/vendor/github.com/casbin/govaluate/lexerState.go new file mode 100644 index 0000000000..6726e909e1 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/lexerState.go @@ -0,0 +1,373 @@ +package govaluate + +import ( + "errors" + "fmt" +) + +type lexerState struct { + isEOF bool + isNullable bool + kind TokenKind + validNextKinds []TokenKind +} + +// lexer states. +// Constant for all purposes except compiler. +var validLexerStates = []lexerState{ + + lexerState{ + kind: UNKNOWN, + isEOF: false, + isNullable: true, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + PATTERN, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + }, + }, + + lexerState{ + + kind: CLAUSE, + isEOF: false, + isNullable: true, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + PATTERN, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + + lexerState{ + + kind: CLAUSE_CLOSE, + isEOF: true, + isNullable: true, + validNextKinds: []TokenKind{ + + COMPARATOR, + MODIFIER, + NUMERIC, + BOOLEAN, + VARIABLE, + STRING, + PATTERN, + TIME, + CLAUSE, + CLAUSE_CLOSE, + LOGICALOP, + TERNARY, + SEPARATOR, + }, + }, + + lexerState{ + + kind: NUMERIC, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: BOOLEAN, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: STRING, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: TIME, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + SEPARATOR, + }, + }, + lexerState{ + + kind: PATTERN, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + SEPARATOR, + }, + }, + lexerState{ + + kind: VARIABLE, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: MODIFIER, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + VARIABLE, + FUNCTION, + ACCESSOR, + STRING, + BOOLEAN, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + lexerState{ + + kind: COMPARATOR, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + CLAUSE_CLOSE, + PATTERN, + }, + }, + lexerState{ + + kind: LOGICALOP, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + lexerState{ + + kind: PREFIX, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + NUMERIC, + BOOLEAN, + VARIABLE, + FUNCTION, + ACCESSOR, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + + lexerState{ + + kind: TERNARY, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + STRING, + TIME, + VARIABLE, + FUNCTION, + ACCESSOR, + CLAUSE, + SEPARATOR, + }, + }, + lexerState{ + + kind: FUNCTION, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + CLAUSE, + }, + }, + lexerState{ + + kind: ACCESSOR, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + CLAUSE, + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: SEPARATOR, + isEOF: false, + isNullable: true, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + STRING, + TIME, + VARIABLE, + FUNCTION, + ACCESSOR, + CLAUSE, + }, + }, +} + +func (this lexerState) canTransitionTo(kind TokenKind) bool { + + for _, validKind := range this.validNextKinds { + + if validKind == kind { + return true + } + } + + return false +} + +func checkExpressionSyntax(tokens []ExpressionToken) error { + + var state lexerState + var lastToken ExpressionToken + var err error + + state = validLexerStates[0] + + for _, token := range tokens { + + if !state.canTransitionTo(token.Kind) { + + // call out a specific error for tokens looking like they want to be functions. + if lastToken.Kind == VARIABLE && token.Kind == CLAUSE { + return errors.New("Undefined function " + lastToken.Value.(string)) + } + + firstStateName := fmt.Sprintf("%s [%v]", state.kind.String(), lastToken.Value) + nextStateName := fmt.Sprintf("%s [%v]", token.Kind.String(), token.Value) + + return errors.New("Cannot transition token types from " + firstStateName + " to " + nextStateName) + } + + state, err = getLexerStateForToken(token.Kind) + if err != nil { + return err + } + + if !state.isNullable && token.Value == nil { + + errorMsg := fmt.Sprintf("Token kind '%v' cannot have a nil value", token.Kind.String()) + return errors.New(errorMsg) + } + + lastToken = token + } + + if !state.isEOF { + return errors.New("Unexpected end of expression") + } + return nil +} + +func getLexerStateForToken(kind TokenKind) (lexerState, error) { + + for _, possibleState := range validLexerStates { + + if possibleState.kind == kind { + return possibleState, nil + } + } + + errorMsg := fmt.Sprintf("No lexer state found for token kind '%v'\n", kind.String()) + return validLexerStates[0], errors.New(errorMsg) +} diff --git a/vendor/github.com/casbin/govaluate/lexerStream.go b/vendor/github.com/casbin/govaluate/lexerStream.go new file mode 100644 index 0000000000..c6ed76ec40 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/lexerStream.go @@ -0,0 +1,37 @@ +package govaluate + +type lexerStream struct { + source []rune + position int + length int +} + +func newLexerStream(source string) *lexerStream { + + var ret *lexerStream + var runes []rune + + for _, character := range source { + runes = append(runes, character) + } + + ret = new(lexerStream) + ret.source = runes + ret.length = len(runes) + return ret +} + +func (this *lexerStream) readCharacter() rune { + + character := this.source[this.position] + this.position += 1 + return character +} + +func (this *lexerStream) rewind(amount int) { + this.position -= amount +} + +func (this lexerStream) canRead() bool { + return this.position < this.length +} diff --git a/vendor/github.com/casbin/govaluate/parameters.go b/vendor/github.com/casbin/govaluate/parameters.go new file mode 100644 index 0000000000..6c5b9ecb53 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/parameters.go @@ -0,0 +1,32 @@ +package govaluate + +import ( + "errors" +) + +/* + Parameters is a collection of named parameters that can be used by an EvaluableExpression to retrieve parameters + when an expression tries to use them. +*/ +type Parameters interface { + + /* + Get gets the parameter of the given name, or an error if the parameter is unavailable. + Failure to find the given parameter should be indicated by returning an error. + */ + Get(name string) (interface{}, error) +} + +type MapParameters map[string]interface{} + +func (p MapParameters) Get(name string) (interface{}, error) { + + value, found := p[name] + + if !found { + errorMessage := "No parameter '" + name + "' found." + return nil, errors.New(errorMessage) + } + + return value, nil +} diff --git a/vendor/github.com/casbin/govaluate/parsing.go b/vendor/github.com/casbin/govaluate/parsing.go new file mode 100644 index 0000000000..dae78f7d2e --- /dev/null +++ b/vendor/github.com/casbin/govaluate/parsing.go @@ -0,0 +1,509 @@ +package govaluate + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" + "unicode" +) + +func parseTokens(expression string, functions map[string]ExpressionFunction) ([]ExpressionToken, error) { + + var ret []ExpressionToken + var token ExpressionToken + var stream *lexerStream + var state lexerState + var err error + var found bool + + stream = newLexerStream(expression) + state = validLexerStates[0] + + for stream.canRead() { + + token, err, found = readToken(stream, state, functions) + + if err != nil { + return ret, err + } + + if !found { + break + } + + state, err = getLexerStateForToken(token.Kind) + if err != nil { + return ret, err + } + + // append this valid token + ret = append(ret, token) + } + + err = checkBalance(ret) + if err != nil { + return nil, err + } + + return ret, nil +} + +func readToken(stream *lexerStream, state lexerState, functions map[string]ExpressionFunction) (ExpressionToken, error, bool) { + + var function ExpressionFunction + var ret ExpressionToken + var tokenValue interface{} + var tokenTime time.Time + var tokenString string + var kind TokenKind + var character rune + var found bool + var completed bool + var err error + + // numeric is 0-9, or . or 0x followed by digits + // string starts with ' + // variable is alphanumeric, always starts with a letter + // bracket always means variable + // symbols are anything non-alphanumeric + // all others read into a buffer until they reach the end of the stream + for stream.canRead() { + + character = stream.readCharacter() + + if unicode.IsSpace(character) { + continue + } + + // numeric constant + if isNumeric(character) { + + if stream.canRead() && character == '0' { + character = stream.readCharacter() + + if stream.canRead() && character == 'x' { + tokenString, _ = readUntilFalse(stream, false, true, true, isHexDigit) + tokenValueInt, err := strconv.ParseUint(tokenString, 16, 64) + + if err != nil { + errorMsg := fmt.Sprintf("Unable to parse hex value '%v' to uint64\n", tokenString) + return ExpressionToken{}, errors.New(errorMsg), false + } + + kind = NUMERIC + tokenValue = float64(tokenValueInt) + break + } else { + stream.rewind(1) + } + } + + tokenString = readTokenUntilFalse(stream, isNumeric) + tokenValue, err = strconv.ParseFloat(tokenString, 64) + + if err != nil { + errorMsg := fmt.Sprintf("Unable to parse numeric value '%v' to float64\n", tokenString) + return ExpressionToken{}, errors.New(errorMsg), false + } + kind = NUMERIC + break + } + + // comma, separator + if character == ',' { + + tokenValue = "," + kind = SEPARATOR + break + } + + // escaped variable + if character == '[' { + + tokenValue, completed = readUntilFalse(stream, true, false, true, isNotClosingBracket) + kind = VARIABLE + + if !completed { + return ExpressionToken{}, errors.New("Unclosed parameter bracket"), false + } + + // above method normally rewinds us to the closing bracket, which we want to skip. + stream.rewind(-1) + break + } + + // regular variable - or function? + if unicode.IsLetter(character) { + + tokenString = readTokenUntilFalse(stream, isVariableName) + + tokenValue = tokenString + kind = VARIABLE + + // boolean? + if tokenValue == "true" { + + kind = BOOLEAN + tokenValue = true + } else { + + if tokenValue == "false" { + + kind = BOOLEAN + tokenValue = false + } + } + + // textual operator? + if tokenValue == "in" || tokenValue == "IN" { + + // force lower case for consistency + tokenValue = "in" + kind = COMPARATOR + } + + // function? + function, found = functions[tokenString] + if found { + kind = FUNCTION + tokenValue = function + } + + // accessor? + accessorIndex := strings.Index(tokenString, ".") + if accessorIndex > 0 { + + // check that it doesn't end with a hanging period + if tokenString[len(tokenString)-1] == '.' { + errorMsg := fmt.Sprintf("Hanging accessor on token '%s'", tokenString) + return ExpressionToken{}, errors.New(errorMsg), false + } + + kind = ACCESSOR + splits := strings.Split(tokenString, ".") + tokenValue = splits + } + break + } + + if !isNotQuote(character) { + tokenValue, completed = readUntilFalse(stream, true, false, true, isNotQuote) + + if !completed { + return ExpressionToken{}, errors.New("Unclosed string literal"), false + } + + // advance the stream one position, since reading until false assumes the terminator is a real token + stream.rewind(-1) + + // check to see if this can be parsed as a time. + tokenTime, found = tryParseTime(tokenValue.(string)) + if found { + kind = TIME + tokenValue = tokenTime + } else { + kind = STRING + } + break + } + + if character == '(' { + tokenValue = character + kind = CLAUSE + break + } + + if character == ')' { + tokenValue = character + kind = CLAUSE_CLOSE + break + } + + // must be a known symbol + tokenString = readTokenUntilFalse(stream, isNotAlphanumeric) + tokenValue = tokenString + + // quick hack for the case where "-" can mean "prefixed negation" or "minus", which are used + // very differently. + if state.canTransitionTo(PREFIX) { + _, found = prefixSymbols[tokenString] + if found { + + kind = PREFIX + break + } + } + _, found = modifierSymbols[tokenString] + if found { + + kind = MODIFIER + break + } + + _, found = logicalSymbols[tokenString] + if found { + + kind = LOGICALOP + break + } + + _, found = comparatorSymbols[tokenString] + if found { + + kind = COMPARATOR + break + } + + _, found = ternarySymbols[tokenString] + if found { + + kind = TERNARY + break + } + + errorMessage := fmt.Sprintf("Invalid token: '%s'", tokenString) + return ret, errors.New(errorMessage), false + } + + ret.Kind = kind + ret.Value = tokenValue + + return ret, nil, (kind != UNKNOWN) +} + +func readTokenUntilFalse(stream *lexerStream, condition func(rune) bool) string { + + var ret string + + stream.rewind(1) + ret, _ = readUntilFalse(stream, false, true, true, condition) + return ret +} + +/* +Returns the string that was read until the given [condition] was false, or whitespace was broken. +Returns false if the stream ended before whitespace was broken or condition was met. +*/ +func readUntilFalse(stream *lexerStream, includeWhitespace bool, breakWhitespace bool, allowEscaping bool, condition func(rune) bool) (string, bool) { + + var tokenBuffer bytes.Buffer + var character rune + var conditioned bool + + conditioned = false + + for stream.canRead() { + + character = stream.readCharacter() + + // Use backslashes to escape anything + if allowEscaping && character == '\\' { + + character = stream.readCharacter() + tokenBuffer.WriteString(string(character)) + continue + } + + if unicode.IsSpace(character) { + + if breakWhitespace && tokenBuffer.Len() > 0 { + conditioned = true + break + } + if !includeWhitespace { + continue + } + } + + if condition(character) { + tokenBuffer.WriteString(string(character)) + } else { + conditioned = true + stream.rewind(1) + break + } + } + + return tokenBuffer.String(), conditioned +} + +/* +Checks to see if any optimizations can be performed on the given [tokens], which form a complete, valid expression. +The returns slice will represent the optimized (or unmodified) list of tokens to use. +*/ +func optimizeTokens(tokens []ExpressionToken) ([]ExpressionToken, error) { + + var token ExpressionToken + var symbol OperatorSymbol + var err error + var index int + + for index, token = range tokens { + + // if we find a regex operator, and the right-hand value is a constant, precompile and replace with a pattern. + if token.Kind != COMPARATOR { + continue + } + + symbol = comparatorSymbols[token.Value.(string)] + if symbol != REQ && symbol != NREQ { + continue + } + + index++ + token = tokens[index] + if token.Kind == STRING { + + token.Kind = PATTERN + token.Value, err = regexp.Compile(token.Value.(string)) + + if err != nil { + return tokens, err + } + + tokens[index] = token + } + } + return tokens, nil +} + +/* +Checks the balance of tokens which have multiple parts, such as parenthesis. +*/ +func checkBalance(tokens []ExpressionToken) error { + + var stream *tokenStream + var token ExpressionToken + var parens int + + stream = newTokenStream(tokens) + + for stream.hasNext() { + + token = stream.next() + if token.Kind == CLAUSE { + parens++ + continue + } + if token.Kind == CLAUSE_CLOSE { + parens-- + continue + } + } + + if parens != 0 { + return errors.New("Unbalanced parenthesis") + } + return nil +} + +func isHexDigit(character rune) bool { + + character = unicode.ToLower(character) + + return unicode.IsDigit(character) || + character == 'a' || + character == 'b' || + character == 'c' || + character == 'd' || + character == 'e' || + character == 'f' +} + +func isNumeric(character rune) bool { + + return unicode.IsDigit(character) || character == '.' +} + +func isNotQuote(character rune) bool { + + return character != '\'' && character != '"' +} + +func isNotAlphanumeric(character rune) bool { + + return !(unicode.IsDigit(character) || + unicode.IsLetter(character) || + character == '(' || + character == ')' || + character == '[' || + character == ']' || // starting to feel like there needs to be an `isOperation` func (#59) + !isNotQuote(character)) +} + +func isVariableName(character rune) bool { + + return unicode.IsLetter(character) || + unicode.IsDigit(character) || + character == '_' || + character == '.' +} + +func isNotClosingBracket(character rune) bool { + + return character != ']' +} + +/* +Attempts to parse the [candidate] as a Time. +Tries a series of standardized date formats, returns the Time if one applies, +otherwise returns false through the second return. +*/ +func tryParseTime(candidate string) (time.Time, bool) { + + var ret time.Time + var found bool + + timeFormats := [...]string{ + time.ANSIC, + time.UnixDate, + time.RubyDate, + time.Kitchen, + time.RFC3339, + time.RFC3339Nano, + "2006-01-02", // RFC 3339 + "2006-01-02 15:04", // RFC 3339 with minutes + "2006-01-02 15:04:05", // RFC 3339 with seconds + "2006-01-02 15:04:05-07:00", // RFC 3339 with seconds and timezone + "2006-01-02T15Z0700", // ISO8601 with hour + "2006-01-02T15:04Z0700", // ISO8601 with minutes + "2006-01-02T15:04:05Z0700", // ISO8601 with seconds + "2006-01-02T15:04:05.999999999Z0700", // ISO8601 with nanoseconds + } + + for _, format := range timeFormats { + + ret, found = tryParseExactTime(candidate, format) + if found { + return ret, true + } + } + + return time.Now(), false +} + +func tryParseExactTime(candidate string, format string) (time.Time, bool) { + + var ret time.Time + var err error + + ret, err = time.ParseInLocation(format, candidate, time.Local) + if err != nil { + return time.Now(), false + } + + return ret, true +} + +func getFirstRune(candidate string) rune { + + for _, character := range candidate { + return character + } + + return 0 +} diff --git a/vendor/github.com/casbin/govaluate/sanitizedParameters.go b/vendor/github.com/casbin/govaluate/sanitizedParameters.go new file mode 100644 index 0000000000..b254bff6aa --- /dev/null +++ b/vendor/github.com/casbin/govaluate/sanitizedParameters.go @@ -0,0 +1,43 @@ +package govaluate + +// sanitizedParameters is a wrapper for Parameters that does sanitization as +// parameters are accessed. +type sanitizedParameters struct { + orig Parameters +} + +func (p sanitizedParameters) Get(key string) (interface{}, error) { + value, err := p.orig.Get(key) + if err != nil { + return nil, err + } + + return castToFloat64(value), nil +} + +func castToFloat64(value interface{}) interface{} { + switch value := value.(type) { + case uint8: + return float64(value) + case uint16: + return float64(value) + case uint32: + return float64(value) + case uint64: + return float64(value) + case int8: + return float64(value) + case int16: + return float64(value) + case int32: + return float64(value) + case int64: + return float64(value) + case int: + return float64(value) + case float32: + return float64(value) + } + + return value +} diff --git a/vendor/github.com/casbin/govaluate/stagePlanner.go b/vendor/github.com/casbin/govaluate/stagePlanner.go new file mode 100644 index 0000000000..400a2879ba --- /dev/null +++ b/vendor/github.com/casbin/govaluate/stagePlanner.go @@ -0,0 +1,728 @@ +package govaluate + +import ( + "errors" + "fmt" + "time" +) + +var stageSymbolMap = map[OperatorSymbol]evaluationOperator{ + EQ: equalStage, + NEQ: notEqualStage, + GT: gtStage, + LT: ltStage, + GTE: gteStage, + LTE: lteStage, + REQ: regexStage, + NREQ: notRegexStage, + AND: andStage, + OR: orStage, + IN: inStage, + BITWISE_OR: bitwiseOrStage, + BITWISE_AND: bitwiseAndStage, + BITWISE_XOR: bitwiseXORStage, + BITWISE_LSHIFT: leftShiftStage, + BITWISE_RSHIFT: rightShiftStage, + PLUS: addStage, + MINUS: subtractStage, + MULTIPLY: multiplyStage, + DIVIDE: divideStage, + MODULUS: modulusStage, + EXPONENT: exponentStage, + NEGATE: negateStage, + INVERT: invertStage, + BITWISE_NOT: bitwiseNotStage, + TERNARY_TRUE: ternaryIfStage, + TERNARY_FALSE: ternaryElseStage, + COALESCE: ternaryElseStage, + SEPARATE: separatorStage, +} + +/* +A "precedent" is a function which will recursively parse new evaluateionStages from a given stream of tokens. +It's called a `precedent` because it is expected to handle exactly what precedence of operator, +and defer to other `precedent`s for other operators. +*/ +type precedent func(stream *tokenStream) (*evaluationStage, error) + +/* +A convenience function for specifying the behavior of a `precedent`. +Most `precedent` functions can be described by the same function, just with different type checks, symbols, and error formats. +This struct is passed to `makePrecedentFromPlanner` to create a `precedent` function. +*/ +type precedencePlanner struct { + validSymbols map[string]OperatorSymbol + validKinds []TokenKind + + typeErrorFormat string + + next precedent + nextRight precedent +} + +var planPrefix precedent +var planExponential precedent +var planMultiplicative precedent +var planAdditive precedent +var planBitwise precedent +var planShift precedent +var planComparator precedent +var planLogicalAnd precedent +var planLogicalOr precedent +var planTernary precedent +var planSeparator precedent + +func init() { + + // all these stages can use the same code (in `planPrecedenceLevel`) to execute, + // they simply need different type checks, symbols, and recursive precedents. + // While not all precedent phases are listed here, most can be represented this way. + planPrefix = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: prefixSymbols, + validKinds: []TokenKind{PREFIX}, + typeErrorFormat: prefixErrorFormat, + nextRight: planFunction, + }) + planExponential = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: exponentialSymbolsS, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planFunction, + }) + planMultiplicative = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: multiplicativeSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planExponential, + }) + planAdditive = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: additiveSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planMultiplicative, + }) + planShift = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: bitwiseShiftSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planAdditive, + }) + planBitwise = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: bitwiseSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planShift, + }) + planComparator = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: comparatorSymbols, + validKinds: []TokenKind{COMPARATOR}, + typeErrorFormat: comparatorErrorFormat, + next: planBitwise, + }) + planLogicalAnd = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: map[string]OperatorSymbol{"&&": AND}, + validKinds: []TokenKind{LOGICALOP}, + typeErrorFormat: logicalErrorFormat, + next: planComparator, + }) + planLogicalOr = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: map[string]OperatorSymbol{"||": OR}, + validKinds: []TokenKind{LOGICALOP}, + typeErrorFormat: logicalErrorFormat, + next: planLogicalAnd, + }) + planTernary = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: ternarySymbols, + validKinds: []TokenKind{TERNARY}, + typeErrorFormat: ternaryErrorFormat, + next: planLogicalOr, + }) + planSeparator = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: separatorSymbols, + validKinds: []TokenKind{SEPARATOR}, + next: planTernary, + }) +} + +/* +Given a planner, creates a function which will evaluate a specific precedence level of operators, +and link it to other `precedent`s which recurse to parse other precedence levels. +*/ +func makePrecedentFromPlanner(planner *precedencePlanner) precedent { + + var generated precedent + var nextRight precedent + + generated = func(stream *tokenStream) (*evaluationStage, error) { + return planPrecedenceLevel( + stream, + planner.typeErrorFormat, + planner.validSymbols, + planner.validKinds, + nextRight, + planner.next, + ) + } + + if planner.nextRight != nil { + nextRight = planner.nextRight + } else { + nextRight = generated + } + + return generated +} + +/* +Creates a `evaluationStageList` object which represents an execution plan (or tree) +which is used to completely evaluate a set of tokens at evaluation-time. +The three stages of evaluation can be thought of as parsing strings to tokens, then tokens to a stage list, then evaluation with parameters. +*/ +func planStages(tokens []ExpressionToken) (*evaluationStage, error) { + + stream := newTokenStream(tokens) + + stage, err := planTokens(stream) + if err != nil { + return nil, err + } + + // while we're now fully-planned, we now need to re-order same-precedence operators. + // this could probably be avoided with a different planning method + reorderStages(stage) + + stage = elideLiterals(stage) + return stage, nil +} + +func planTokens(stream *tokenStream) (*evaluationStage, error) { + + if !stream.hasNext() { + return nil, nil + } + + return planSeparator(stream) +} + +/* +The most usual method of parsing an evaluation stage for a given precedence. +Most stages use the same logic +*/ +func planPrecedenceLevel( + stream *tokenStream, + typeErrorFormat string, + validSymbols map[string]OperatorSymbol, + validKinds []TokenKind, + rightPrecedent precedent, + leftPrecedent precedent) (*evaluationStage, error) { + + var token ExpressionToken + var symbol OperatorSymbol + var leftStage, rightStage *evaluationStage + var checks typeChecks + var err error + var keyFound bool + + if leftPrecedent != nil { + + leftStage, err = leftPrecedent(stream) + if err != nil { + return nil, err + } + } + + rewind := func() (*evaluationStage, error) { + stream.rewind() + return leftStage, nil + } + + if stream.hasNext() { + + token = stream.next() + + if len(validKinds) > 0 { + + keyFound = false + for _, kind := range validKinds { + if kind == token.Kind { + keyFound = true + break + } + } + + if !keyFound { + return rewind() + } + } + + if validSymbols != nil { + + if !isString(token.Value) { + return rewind() + } + + symbol, keyFound = validSymbols[token.Value.(string)] + if !keyFound { + return rewind() + } + } + + if rightPrecedent != nil { + rightStage, err = rightPrecedent(stream) + if err != nil { + return nil, err + } + } + + checks = findTypeChecks(symbol) + + return &evaluationStage{ + + symbol: symbol, + leftStage: leftStage, + rightStage: rightStage, + operator: stageSymbolMap[symbol], + + leftTypeCheck: checks.left, + rightTypeCheck: checks.right, + typeCheck: checks.combined, + typeErrorFormat: typeErrorFormat, + }, nil + } + + return rewind() +} + +/* +A special case where functions need to be of higher precedence than values, and need a special wrapped execution stage operator. +*/ +func planFunction(stream *tokenStream) (*evaluationStage, error) { + + var token ExpressionToken + var rightStage *evaluationStage + var err error + + token = stream.next() + + if token.Kind != FUNCTION { + stream.rewind() + return planAccessor(stream) + } + + rightStage, err = planAccessor(stream) + if err != nil { + return nil, err + } + + return &evaluationStage{ + + symbol: FUNCTIONAL, + rightStage: rightStage, + operator: makeFunctionStage(token.Value.(ExpressionFunction)), + typeErrorFormat: "Unable to run function '%v': %v", + }, nil +} + +func planAccessor(stream *tokenStream) (*evaluationStage, error) { + + var token, otherToken ExpressionToken + var rightStage *evaluationStage + var err error + + if !stream.hasNext() { + return nil, nil + } + + token = stream.next() + + if token.Kind != ACCESSOR { + stream.rewind() + return planValue(stream) + } + + // check if this is meant to be a function or a field. + // fields have a clause next to them, functions do not. + // if it's a function, parse the arguments. Otherwise leave the right stage null. + if stream.hasNext() { + + otherToken = stream.next() + if otherToken.Kind == CLAUSE { + + stream.rewind() + + rightStage, err = planTokens(stream) + if err != nil { + return nil, err + } + } else { + stream.rewind() + } + } + + return &evaluationStage{ + + symbol: ACCESS, + rightStage: rightStage, + operator: makeAccessorStage(token.Value.([]string)), + typeErrorFormat: "Unable to access parameter field or method '%v': %v", + }, nil +} + +/* +A truly special precedence function, this handles all the "lowest-case" errata of the process, including literals, parmeters, +clauses, and prefixes. +*/ +func planValue(stream *tokenStream) (*evaluationStage, error) { + + var token ExpressionToken + var symbol OperatorSymbol + var ret *evaluationStage + var operator evaluationOperator + var err error + + if !stream.hasNext() { + return nil, nil + } + + token = stream.next() + + switch token.Kind { + + case CLAUSE: + + ret, err = planTokens(stream) + if err != nil { + return nil, err + } + + // advance past the CLAUSE_CLOSE token. We know that it's a CLAUSE_CLOSE, because at parse-time we check for unbalanced parens. + stream.next() + + // the stage we got represents all of the logic contained within the parens + // but for technical reasons, we need to wrap this stage in a "noop" stage which breaks long chains of precedence. + // see github #33. + ret = &evaluationStage{ + rightStage: ret, + operator: noopStageRight, + symbol: NOOP, + } + + return ret, nil + + case CLAUSE_CLOSE: + + // when functions have empty params, this will be hit. In this case, we don't have any evaluation stage to do, + // so we just return nil so that the stage planner continues on its way. + stream.rewind() + return nil, nil + + case VARIABLE: + operator = makeParameterStage(token.Value.(string)) + + case NUMERIC: + fallthrough + case STRING: + fallthrough + case PATTERN: + fallthrough + case BOOLEAN: + symbol = LITERAL + operator = makeLiteralStage(token.Value) + case TIME: + symbol = LITERAL + operator = makeLiteralStage(float64(token.Value.(time.Time).Unix())) + + case PREFIX: + stream.rewind() + return planPrefix(stream) + } + + if operator == nil { + errorMsg := fmt.Sprintf("Unable to plan token kind: '%s', value: '%v'", token.Kind.String(), token.Value) + return nil, errors.New(errorMsg) + } + + return &evaluationStage{ + symbol: symbol, + operator: operator, + }, nil +} + +/* +Convenience function to pass a triplet of typechecks between `findTypeChecks` and `planPrecedenceLevel`. +Each of these members may be nil, which indicates that type does not matter for that value. +*/ +type typeChecks struct { + left stageTypeCheck + right stageTypeCheck + combined stageCombinedTypeCheck +} + +/* +Maps a given [symbol] to a set of typechecks to be used during runtime. +*/ +func findTypeChecks(symbol OperatorSymbol) typeChecks { + + switch symbol { + case GT: + fallthrough + case LT: + fallthrough + case GTE: + fallthrough + case LTE: + return typeChecks{ + combined: comparatorTypeCheck, + } + case REQ: + fallthrough + case NREQ: + return typeChecks{ + left: isString, + right: isRegexOrString, + } + case AND: + fallthrough + case OR: + return typeChecks{ + left: isBool, + right: isBool, + } + case IN: + return typeChecks{ + right: isArray, + } + case BITWISE_LSHIFT: + fallthrough + case BITWISE_RSHIFT: + fallthrough + case BITWISE_OR: + fallthrough + case BITWISE_AND: + fallthrough + case BITWISE_XOR: + return typeChecks{ + left: isFloat64, + right: isFloat64, + } + case PLUS: + return typeChecks{ + combined: additionTypeCheck, + } + case MINUS: + fallthrough + case MULTIPLY: + fallthrough + case DIVIDE: + fallthrough + case MODULUS: + fallthrough + case EXPONENT: + return typeChecks{ + left: isFloat64, + right: isFloat64, + } + case NEGATE: + return typeChecks{ + right: isFloat64, + } + case INVERT: + return typeChecks{ + right: isBool, + } + case BITWISE_NOT: + return typeChecks{ + right: isFloat64, + } + case TERNARY_TRUE: + return typeChecks{ + left: isBool, + } + + // unchecked cases + case EQ: + fallthrough + case NEQ: + return typeChecks{} + case TERNARY_FALSE: + fallthrough + case COALESCE: + fallthrough + default: + return typeChecks{} + } +} + +/* +During stage planning, stages of equal precedence are parsed such that they'll be evaluated in reverse order. +For commutative operators like "+" or "-", it's no big deal. But for order-specific operators, it ruins the expected result. +*/ +func reorderStages(rootStage *evaluationStage) { + + // traverse every rightStage until we find multiples in a row of the same precedence. + var identicalPrecedences []*evaluationStage + var currentStage, nextStage *evaluationStage + var precedence, currentPrecedence operatorPrecedence + + nextStage = rootStage + precedence = findOperatorPrecedenceForSymbol(rootStage.symbol) + + for nextStage != nil { + + currentStage = nextStage + nextStage = currentStage.rightStage + + // left depth first, since this entire method only looks for precedences down the right side of the tree + if currentStage.leftStage != nil { + reorderStages(currentStage.leftStage) + } + + currentPrecedence = findOperatorPrecedenceForSymbol(currentStage.symbol) + + if currentPrecedence == precedence { + identicalPrecedences = append(identicalPrecedences, currentStage) + continue + } + + // precedence break. + // See how many in a row we had, and reorder if there's more than one. + if len(identicalPrecedences) > 1 { + mirrorStageSubtree(identicalPrecedences) + } + + identicalPrecedences = []*evaluationStage{currentStage} + precedence = currentPrecedence + } + + if len(identicalPrecedences) > 1 { + mirrorStageSubtree(identicalPrecedences) + } +} + +/* +Performs a "mirror" on a subtree of stages. +This mirror functionally inverts the order of execution for all members of the [stages] list. +That list is assumed to be a root-to-leaf (ordered) list of evaluation stages, where each is a right-hand stage of the last. +*/ +func mirrorStageSubtree(stages []*evaluationStage) { + + var rootStage, inverseStage, carryStage, frontStage *evaluationStage + + stagesLength := len(stages) + + // reverse all right/left + for _, frontStage = range stages { + + carryStage = frontStage.rightStage + frontStage.rightStage = frontStage.leftStage + frontStage.leftStage = carryStage + } + + // end left swaps with root right + rootStage = stages[0] + frontStage = stages[stagesLength-1] + + carryStage = frontStage.leftStage + frontStage.leftStage = rootStage.rightStage + rootStage.rightStage = carryStage + + // for all non-root non-end stages, right is swapped with inverse stage right in list + for i := 0; i < (stagesLength-2)/2+1; i++ { + + frontStage = stages[i+1] + inverseStage = stages[stagesLength-i-1] + + carryStage = frontStage.rightStage + frontStage.rightStage = inverseStage.rightStage + inverseStage.rightStage = carryStage + } + + // swap all other information with inverse stages + for i := 0; i < stagesLength/2; i++ { + + frontStage = stages[i] + inverseStage = stages[stagesLength-i-1] + frontStage.swapWith(inverseStage) + } +} + +/* +Recurses through all operators in the entire tree, eliding operators where both sides are literals. +*/ +func elideLiterals(root *evaluationStage) *evaluationStage { + + if root.leftStage != nil { + root.leftStage = elideLiterals(root.leftStage) + } + + if root.rightStage != nil { + root.rightStage = elideLiterals(root.rightStage) + } + + return elideStage(root) +} + +/* +Elides a specific stage, if possible. +Returns the unmodified [root] stage if it cannot or should not be elided. +Otherwise, returns a new stage representing the condensed value from the elided stages. +*/ +func elideStage(root *evaluationStage) *evaluationStage { + + var leftValue, rightValue, result interface{} + var err error + + // right side must be a non-nil value. Left side must be nil or a value. + if root.rightStage == nil || + root.rightStage.symbol != LITERAL || + root.leftStage == nil || + root.leftStage.symbol != LITERAL { + return root + } + + // don't elide some operators + switch root.symbol { + case SEPARATE: + fallthrough + case IN: + return root + } + + // both sides are values, get their actual values. + // errors should be near-impossible here. If we encounter them, just abort this optimization. + leftValue, err = root.leftStage.operator(nil, nil, nil) + if err != nil { + return root + } + + rightValue, err = root.rightStage.operator(nil, nil, nil) + if err != nil { + return root + } + + // typcheck, since the grammar checker is a bit loose with which operator symbols go together. + err = typeCheck(root.leftTypeCheck, leftValue, root.symbol, root.typeErrorFormat) + if err != nil { + return root + } + + err = typeCheck(root.rightTypeCheck, rightValue, root.symbol, root.typeErrorFormat) + if err != nil { + return root + } + + if root.typeCheck != nil && !root.typeCheck(leftValue, rightValue) { + return root + } + + // pre-calculate, and return a new stage representing the result. + result, err = root.operator(leftValue, rightValue, nil) + if err != nil { + return root + } + + return &evaluationStage{ + symbol: LITERAL, + operator: makeLiteralStage(result), + } +} diff --git a/vendor/github.com/casbin/govaluate/test.sh b/vendor/github.com/casbin/govaluate/test.sh new file mode 100644 index 0000000000..11aa8b3323 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/test.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Script that runs tests, code coverage, and benchmarks all at once. +# Builds a symlink in /tmp, mostly to avoid messing with GOPATH at the user's shell level. + +TEMPORARY_PATH="/tmp/govaluate_test" +SRC_PATH="${TEMPORARY_PATH}/src" +FULL_PATH="${TEMPORARY_PATH}/src/govaluate" + +# set up temporary directory +rm -rf "${FULL_PATH}" +mkdir -p "${SRC_PATH}" + +ln -s $(pwd) "${FULL_PATH}" +export GOPATH="${TEMPORARY_PATH}" + +pushd "${TEMPORARY_PATH}/src/govaluate" + +# run the actual tests. +export GOVALUATE_TORTURE_TEST="true" +go test -bench=. -benchmem #-coverprofile coverage.out +status=$? + +if [ "${status}" != 0 ]; +then + exit $status +fi + +# coverage +# disabled because travis go1.4 seems not to support it suddenly? +#go tool cover -func=coverage.out + +popd diff --git a/vendor/github.com/casbin/govaluate/tokenStream.go b/vendor/github.com/casbin/govaluate/tokenStream.go new file mode 100644 index 0000000000..7c7c40abda --- /dev/null +++ b/vendor/github.com/casbin/govaluate/tokenStream.go @@ -0,0 +1,30 @@ +package govaluate + +type tokenStream struct { + tokens []ExpressionToken + index int + tokenLength int +} + +func newTokenStream(tokens []ExpressionToken) *tokenStream { + ret := new(tokenStream) + ret.tokens = tokens + ret.tokenLength = len(tokens) + return ret +} + +func (this *tokenStream) rewind() { + this.index -= 1 +} + +func (this *tokenStream) next() ExpressionToken { + token := this.tokens[this.index] + + this.index += 1 + return token +} + +func (this tokenStream) hasNext() bool { + + return this.index < this.tokenLength +} diff --git a/vendor/github.com/casbin/xorm-adapter/v2/.gitignore b/vendor/github.com/casbin/xorm-adapter/v2/.gitignore new file mode 100644 index 0000000000..d7c6275ef8 --- /dev/null +++ b/vendor/github.com/casbin/xorm-adapter/v2/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +.idea/ +*.iml \ No newline at end of file diff --git a/vendor/github.com/casbin/xorm-adapter/v2/.releaserc.json b/vendor/github.com/casbin/xorm-adapter/v2/.releaserc.json new file mode 100644 index 0000000000..aaa0e82f50 --- /dev/null +++ b/vendor/github.com/casbin/xorm-adapter/v2/.releaserc.json @@ -0,0 +1,17 @@ +{ + "debug": true, + "branches": [ + "+([0-9])?(.{+([0-9]),x}).x", + "master", + { + "name": "beta", + "prerelease": true + } + ], + "plugins": [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + "@semantic-release/github" + ] + } + \ No newline at end of file diff --git a/vendor/github.com/casbin/xorm-adapter/v2/LICENSE b/vendor/github.com/casbin/xorm-adapter/v2/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/casbin/xorm-adapter/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/casbin/xorm-adapter/v2/Makefile b/vendor/github.com/casbin/xorm-adapter/v2/Makefile new file mode 100644 index 0000000000..bd30a5a7d6 --- /dev/null +++ b/vendor/github.com/casbin/xorm-adapter/v2/Makefile @@ -0,0 +1,7 @@ +default: lint test + +test: + go test -race -v . + +lint: + golangci-lint run --verbose diff --git a/vendor/github.com/casbin/xorm-adapter/v2/README.md b/vendor/github.com/casbin/xorm-adapter/v2/README.md new file mode 100644 index 0000000000..dfea5c039d --- /dev/null +++ b/vendor/github.com/casbin/xorm-adapter/v2/README.md @@ -0,0 +1,110 @@ +Xorm Adapter +[![Go](https://github.com/casbin/xorm-adapter/actions/workflows/ci.yml/badge.svg)](https://github.com/casbin/xorm-adapter/actions/workflows/ci.yml) +[![Coverage Status](https://coveralls.io/repos/github/casbin/xorm-adapter/badge.svg?branch=master)](https://coveralls.io/github/casbin/xorm-adapter?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/casbin/xorm-adapter)](https://goreportcard.com/report/github.com/casbin/xorm-adapter) +[![Godoc](https://godoc.org/github.com/casbin/xorm-adapter?status.svg)](https://godoc.org/github.com/casbin/xorm-adapter) +--- + +Xorm Adapter is the [Xorm](https://gitea.com/xorm/xorm) adapter for [Casbin](https://github.com/casbin/casbin). With this library, Casbin can load policy from Xorm supported database or save policy to it. + +Based on [Xorm Drivers Support](https://gitea.com/xorm/xorm#drivers-support), The current supported databases are: + +- Mysql: [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) +- MyMysql: [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv) +- Postgres: [github.com/lib/pq](https://github.com/lib/pq) +- Tidb: [github.com/pingcap/tidb](https://github.com/pingcap/tidb) +- SQLite: [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) +- MsSql: [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) +- Oracle: [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (experiment) + +## Installation + + go get github.com/casbin/xorm-adapter + +## Simple MySQL Example + +```go +package main + +import ( + "github.com/casbin/casbin/v2" + _ "github.com/go-sql-driver/mysql" + + "github.com/casbin/xorm-adapter/v2" +) + +func main() { + // Initialize a Xorm adapter and use it in a Casbin enforcer: + // The adapter will use the MySQL database named "casbin". + // If it doesn't exist, the adapter will create it automatically. + a, _ := xormadapter.NewAdapter("mysql", "mysql_username:mysql_password@tcp(127.0.0.1:3306)/") // Your driver and data source. + + // Or you can use an existing DB "abc" like this: + // The adapter will use the table named "casbin_rule". + // If it doesn't exist, the adapter will create it automatically. + // a := xormadapter.NewAdapter("mysql", "mysql_username:mysql_password@tcp(127.0.0.1:3306)/abc", true) + + e, _ := casbin.NewEnforcer("examples/rbac_model.conf", a) + + // Load the policy from DB. + e.LoadPolicy() + + // Check the permission. + e.Enforce("alice", "data1", "read") + + // Modify the policy. + // e.AddPolicy(...) + // e.RemovePolicy(...) + + // Save the policy back to DB. + e.SavePolicy() +} +``` + +## Simple Postgres Example + +```go +package main + +import ( + "github.com/casbin/casbin/v2" + _ "github.com/lib/pq" + + "github.com/casbin/xorm-adapter" +) + +func main() { + // Initialize a Xorm adapter and use it in a Casbin enforcer: + // The adapter will use the Postgres database named "casbin". + // If it doesn't exist, the adapter will create it automatically. + a, _ := xormadapter.NewAdapter("postgres", "user=postgres_username password=postgres_password host=127.0.0.1 port=5432 sslmode=disable") // Your driver and data source. + + // Or you can use an existing DB "abc" like this: + // The adapter will use the table named "casbin_rule". + // If it doesn't exist, the adapter will create it automatically. + // a := xormadapter.NewAdapter("postgres", "dbname=abc user=postgres_username password=postgres_password host=127.0.0.1 port=5432 sslmode=disable", true) + + e, _ := casbin.NewEnforcer("../examples/rbac_model.conf", a) + + // Load the policy from DB. + e.LoadPolicy() + + // Check the permission. + e.Enforce("alice", "data1", "read") + + // Modify the policy. + // e.AddPolicy(...) + // e.RemovePolicy(...) + + // Save the policy back to DB. + e.SavePolicy() +} +``` + +## Getting Help + +- [Casbin](https://github.com/casbin/casbin) + +## License + +This project is under Apache 2.0 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/casbin/xorm-adapter/v2/adapter.go b/vendor/github.com/casbin/xorm-adapter/v2/adapter.go new file mode 100644 index 0000000000..e5944cb049 --- /dev/null +++ b/vendor/github.com/casbin/xorm-adapter/v2/adapter.go @@ -0,0 +1,614 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xormadapter + +import ( + "errors" + "log" + "runtime" + "strings" + + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" + "github.com/lib/pq" + "xorm.io/xorm" +) + +// TableName if tableName=="" , adapter will use default tablename "casbin_rule". +func (the *CasbinRule) TableName() string { + if len(the.tableName) == 0 { + return "casbin_rule" + } + return the.tableName +} + +// CasbinRule . +type CasbinRule struct { + PType string `xorm:"varchar(100) index not null default ''"` + V0 string `xorm:"varchar(100) index not null default ''"` + V1 string `xorm:"varchar(100) index not null default ''"` + V2 string `xorm:"varchar(100) index not null default ''"` + V3 string `xorm:"varchar(100) index not null default ''"` + V4 string `xorm:"varchar(100) index not null default ''"` + V5 string `xorm:"varchar(100) index not null default ''"` + + tableName string `xorm:"-"` +} + +// Adapter represents the Xorm adapter for policy storage. +type Adapter struct { + driverName string + dataSourceName string + dbSpecified bool + isFiltered bool + engine *xorm.Engine + tablePrefix string + tableName string +} + +// Filter . +type Filter struct { + PType []string + V0 []string + V1 []string + V2 []string + V3 []string + V4 []string + V5 []string +} + +// finalizer is the destructor for Adapter. +func finalizer(a *Adapter) { + if a.engine == nil { + return + } + + err := a.engine.Close() + if err != nil { + log.Printf("close xorm adapter engine failed, err: %v", err) + } +} + +// NewAdapter is the constructor for Adapter. +// dbSpecified is an optional bool parameter. The default value is false. +// It's up to whether you have specified an existing DB in dataSourceName. +// If dbSpecified == true, you need to make sure the DB in dataSourceName exists. +// If dbSpecified == false, the adapter will automatically create a DB named "casbin". +func NewAdapter(driverName string, dataSourceName string, dbSpecified ...bool) (*Adapter, error) { + a := &Adapter{ + driverName: driverName, + dataSourceName: dataSourceName, + } + + if len(dbSpecified) == 0 { + a.dbSpecified = false + } else if len(dbSpecified) == 1 { + a.dbSpecified = dbSpecified[0] + } else { + return nil, errors.New("invalid parameter: dbSpecified") + } + + // Open the DB, create it if not existed. + err := a.open() + if err != nil { + return nil, err + } + + // Call the destructor when the object is released. + runtime.SetFinalizer(a, finalizer) + + return a, nil +} + +// NewAdapterWithTableName . +func NewAdapterWithTableName(driverName string, dataSourceName string, tableName string, tablePrefix string, dbSpecified ...bool) (*Adapter, error) { + a := &Adapter{ + driverName: driverName, + dataSourceName: dataSourceName, + tableName: tableName, + tablePrefix: tablePrefix, + } + + if len(dbSpecified) == 0 { + a.dbSpecified = false + } else if len(dbSpecified) == 1 { + a.dbSpecified = dbSpecified[0] + } else { + return nil, errors.New("invalid parameter: dbSpecified") + } + + // Open the DB, create it if not existed. + err := a.open() + if err != nil { + return nil, err + } + + // Call the destructor when the object is released. + runtime.SetFinalizer(a, finalizer) + + return a, nil +} + +// NewAdapterByEngine . +func NewAdapterByEngine(engine *xorm.Engine) (*Adapter, error) { + a := &Adapter{ + engine: engine, + } + + err := a.createTable() + if err != nil { + return nil, err + } + + return a, nil +} + +// NewAdapterByEngineWithTableName . +func NewAdapterByEngineWithTableName(engine *xorm.Engine, tableName string, tablePrefix string) (*Adapter, error) { + a := &Adapter{ + engine: engine, + tableName: tableName, + tablePrefix: tablePrefix, + } + + err := a.createTable() + if err != nil { + return nil, err + } + + return a, nil +} + +func (a *Adapter) getFullTableName() string { + if a.tablePrefix != "" { + return a.tablePrefix + a.tableName + } + return a.tableName +} + +func (a *Adapter) createDatabase() error { + var err error + var engine *xorm.Engine + if a.driverName == "postgres" { + engine, err = xorm.NewEngine(a.driverName, a.dataSourceName+" dbname=postgres") + } else { + engine, err = xorm.NewEngine(a.driverName, a.dataSourceName) + } + if err != nil { + return err + } + + if a.driverName == "postgres" { + if _, err = engine.Exec("CREATE DATABASE casbin"); err != nil { + // 42P04 is duplicate_database + if pqerr, ok := err.(*pq.Error); ok && pqerr.Code == "42P04" { + _ = engine.Close() + return nil + } + } + } else if a.driverName != "sqlite3" { + _, err = engine.Exec("CREATE DATABASE IF NOT EXISTS casbin") + } + if err != nil { + _ = engine.Close() + return err + } + + return engine.Close() +} + +func (a *Adapter) open() error { + var err error + var engine *xorm.Engine + + if a.dbSpecified { + engine, err = xorm.NewEngine(a.driverName, a.dataSourceName) + if err != nil { + return err + } + } else { + if err = a.createDatabase(); err != nil { + return err + } + + if a.driverName == "postgres" { + engine, err = xorm.NewEngine(a.driverName, a.dataSourceName+" dbname=casbin") + } else if a.driverName == "sqlite3" { + engine, err = xorm.NewEngine(a.driverName, a.dataSourceName) + } else { + engine, err = xorm.NewEngine(a.driverName, a.dataSourceName+"casbin") + } + if err != nil { + return err + } + } + + a.engine = engine + + return a.createTable() +} + +func (a *Adapter) createTable() error { + return a.engine.Sync2(&CasbinRule{tableName: a.getFullTableName()}) +} + +func (a *Adapter) dropTable() error { + return a.engine.DropTables(&CasbinRule{tableName: a.getFullTableName()}) +} + +func loadPolicyLine(line *CasbinRule, model model.Model) { + var p = []string{line.PType, + line.V0, line.V1, line.V2, line.V3, line.V4, line.V5} + var lineText string + if line.V5 != "" { + lineText = strings.Join(p, ", ") + } else if line.V4 != "" { + lineText = strings.Join(p[:6], ", ") + } else if line.V3 != "" { + lineText = strings.Join(p[:5], ", ") + } else if line.V2 != "" { + lineText = strings.Join(p[:4], ", ") + } else if line.V1 != "" { + lineText = strings.Join(p[:3], ", ") + } else if line.V0 != "" { + lineText = strings.Join(p[:2], ", ") + } + + persist.LoadPolicyLine(lineText, model) +} + +// LoadPolicy loads policy from database. +func (a *Adapter) LoadPolicy(model model.Model) error { + lines := make([]*CasbinRule, 0, 64) + + if err := a.engine.Table(&CasbinRule{tableName: a.getFullTableName()}).Find(&lines); err != nil { + return err + } + + for _, line := range lines { + loadPolicyLine(line, model) + } + + return nil +} + +func (a *Adapter) genPolicyLine(ptype string, rule []string) *CasbinRule { + line := CasbinRule{PType: ptype, tableName: a.getFullTableName()} + + l := len(rule) + if l > 0 { + line.V0 = rule[0] + } + if l > 1 { + line.V1 = rule[1] + } + if l > 2 { + line.V2 = rule[2] + } + if l > 3 { + line.V3 = rule[3] + } + if l > 4 { + line.V4 = rule[4] + } + if l > 5 { + line.V5 = rule[5] + } + + return &line +} + +// SavePolicy saves policy to database. +func (a *Adapter) SavePolicy(model model.Model) error { + err := a.dropTable() + if err != nil { + return err + } + err = a.createTable() + if err != nil { + return err + } + + lines := make([]*CasbinRule, 0, 64) + + for ptype, ast := range model["p"] { + for _, rule := range ast.Policy { + line := a.genPolicyLine(ptype, rule) + lines = append(lines, line) + } + } + + for ptype, ast := range model["g"] { + for _, rule := range ast.Policy { + line := a.genPolicyLine(ptype, rule) + lines = append(lines, line) + } + } + + // check whether the policy is empty + if len(lines) == 0 { + return nil + } + + _, err = a.engine.Insert(&lines) + + return err +} + +// AddPolicy adds a policy rule to the storage. +func (a *Adapter) AddPolicy(sec string, ptype string, rule []string) error { + line := a.genPolicyLine(ptype, rule) + _, err := a.engine.InsertOne(line) + return err +} + +// AddPolicies adds multiple policy rule to the storage. +func (a *Adapter) AddPolicies(sec string, ptype string, rules [][]string) error { + _, err := a.engine.Transaction(func(tx *xorm.Session) (interface{}, error) { + for _, rule := range rules { + line := a.genPolicyLine(ptype, rule) + _, err := tx.InsertOne(line) + if err != nil { + return nil, err + } + } + return nil, nil + }) + return err +} + +// RemovePolicy removes a policy rule from the storage. +func (a *Adapter) RemovePolicy(sec string, ptype string, rule []string) error { + line := a.genPolicyLine(ptype, rule) + _, err := a.engine.Delete(line) + return err +} + +// RemovePolicies removes multiple policy rule from the storage. +func (a *Adapter) RemovePolicies(sec string, ptype string, rules [][]string) error { + _, err := a.engine.Transaction(func(tx *xorm.Session) (interface{}, error) { + for _, rule := range rules { + line := a.genPolicyLine(ptype, rule) + _, err := tx.Delete(line) + if err != nil { + return nil, nil + } + } + return nil, nil + }) + return err +} + +// RemoveFilteredPolicy removes policy rules that match the filter from the storage. +func (a *Adapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { + line := CasbinRule{PType: ptype, tableName: a.getFullTableName()} + + idx := fieldIndex + len(fieldValues) + if fieldIndex <= 0 && idx > 0 { + line.V0 = fieldValues[0-fieldIndex] + } + if fieldIndex <= 1 && idx > 1 { + line.V1 = fieldValues[1-fieldIndex] + } + if fieldIndex <= 2 && idx > 2 { + line.V2 = fieldValues[2-fieldIndex] + } + if fieldIndex <= 3 && idx > 3 { + line.V3 = fieldValues[3-fieldIndex] + } + if fieldIndex <= 4 && idx > 4 { + line.V4 = fieldValues[4-fieldIndex] + } + if fieldIndex <= 5 && idx > 5 { + line.V5 = fieldValues[5-fieldIndex] + } + + _, err := a.engine.Delete(&line) + return err +} + +// LoadFilteredPolicy loads only policy rules that match the filter. +func (a *Adapter) LoadFilteredPolicy(model model.Model, filter interface{}) error { + filterValue, ok := filter.(Filter) + if !ok { + return errors.New("invalid filter type") + } + + lines := make([]*CasbinRule, 0, 64) + if err := a.filterQuery(a.engine.NewSession(), filterValue).Table(&CasbinRule{tableName: a.getFullTableName()}).Find(&lines); err != nil { + return err + } + + for _, line := range lines { + loadPolicyLine(line, model) + } + a.isFiltered = true + return nil +} + +// IsFiltered returns true if the loaded policy has been filtered. +func (a *Adapter) IsFiltered() bool { + return a.isFiltered +} + +func (a *Adapter) filterQuery(session *xorm.Session, filter Filter) *xorm.Session { + filterValue := [7]struct { + col string + val []string + }{ + {"p_type", filter.PType}, + {"v0", filter.V0}, + {"v1", filter.V1}, + {"v2", filter.V2}, + {"v3", filter.V3}, + {"v4", filter.V4}, + {"v5", filter.V5}, + } + + for idx := range filterValue { + switch len(filterValue[idx].val) { + case 0: + continue + case 1: + session.And(filterValue[idx].col+" = ?", filterValue[idx].val[0]) + default: + session.In(filterValue[idx].col, filterValue[idx].val) + } + } + + return session +} + +// UpdatePolicy update oldRule to newPolicy permanently +func (a *Adapter) UpdatePolicy(sec string, ptype string, oldRule, newPolicy []string) error { + oRule := a.genPolicyLine(ptype, oldRule) + _, err := a.engine.Update(a.genPolicyLine(ptype, newPolicy), oRule) + return err +} + +// UpdatePolicies updates some policy rules to storage, like db, redis. +func (a *Adapter) UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error { + session := a.engine.NewSession() + defer session.Close() + + if err := session.Begin(); err != nil { + return err + } + + for i, oldRule := range oldRules { + nRule, oRule := a.genPolicyLine(ptype, newRules[i]), a.genPolicyLine(ptype, oldRule) + if _, err := session.Update(nRule, oRule); err != nil { + return err + } + } + + return session.Commit() +} + +func (a *Adapter) UpdateFilteredPolicies(sec string, ptype string, newPolicies [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) { + // UpdateFilteredPolicies deletes old rules and adds new rules. + line := &CasbinRule{} + + line.PType = ptype + if fieldIndex <= 0 && 0 < fieldIndex+len(fieldValues) { + line.V0 = fieldValues[0-fieldIndex] + } + if fieldIndex <= 1 && 1 < fieldIndex+len(fieldValues) { + line.V1 = fieldValues[1-fieldIndex] + } + if fieldIndex <= 2 && 2 < fieldIndex+len(fieldValues) { + line.V2 = fieldValues[2-fieldIndex] + } + if fieldIndex <= 3 && 3 < fieldIndex+len(fieldValues) { + line.V3 = fieldValues[3-fieldIndex] + } + if fieldIndex <= 4 && 4 < fieldIndex+len(fieldValues) { + line.V4 = fieldValues[4-fieldIndex] + } + if fieldIndex <= 5 && 5 < fieldIndex+len(fieldValues) { + line.V5 = fieldValues[5-fieldIndex] + } + + newP := make([]CasbinRule, 0, len(newPolicies)) + oldP := make([]CasbinRule, 0) + for _, newRule := range newPolicies { + newP = append(newP, *a.genPolicyLine(ptype, newRule)) + } + tx := a.engine.NewSession().Table(&CasbinRule{tableName: a.getFullTableName()}) + defer tx.Close() + + if err := tx.Begin(); err != nil { + return nil, err + } + + for i := range newP { + str, args := line.queryString() + if err := tx.Where(str, args...).Find(&oldP); err != nil { + return nil, tx.Rollback() + } + if _, err := tx.Where(str.(string), args...).Delete(&CasbinRule{tableName: a.getFullTableName()}); err != nil { + return nil, tx.Rollback() + } + if _, err := tx.Insert(&newP[i]); err != nil { + return nil, tx.Rollback() + } + } + + // return deleted rulues + oldPolicies := make([][]string, 0) + for _, v := range oldP { + oldPolicy := v.toStringPolicy() + oldPolicies = append(oldPolicies, oldPolicy) + } + return oldPolicies, tx.Commit() +} + +func (c *CasbinRule) toStringPolicy() []string { + policy := make([]string, 0) + if c.PType != "" { + policy = append(policy, c.PType) + } + if c.V0 != "" { + policy = append(policy, c.V0) + } + if c.V1 != "" { + policy = append(policy, c.V1) + } + if c.V2 != "" { + policy = append(policy, c.V2) + } + if c.V3 != "" { + policy = append(policy, c.V3) + } + if c.V4 != "" { + policy = append(policy, c.V4) + } + if c.V5 != "" { + policy = append(policy, c.V5) + } + return policy +} + +func (c *CasbinRule) queryString() (interface{}, []interface{}) { + queryArgs := []interface{}{c.PType} + + queryStr := "p_type = ?" + if c.V0 != "" { + queryStr += " and v0 = ?" + queryArgs = append(queryArgs, c.V0) + } + if c.V1 != "" { + queryStr += " and v1 = ?" + queryArgs = append(queryArgs, c.V1) + } + if c.V2 != "" { + queryStr += " and v2 = ?" + queryArgs = append(queryArgs, c.V2) + } + if c.V3 != "" { + queryStr += " and v3 = ?" + queryArgs = append(queryArgs, c.V3) + } + if c.V4 != "" { + queryStr += " and v4 = ?" + queryArgs = append(queryArgs, c.V4) + } + if c.V5 != "" { + queryStr += " and v5 = ?" + queryArgs = append(queryArgs, c.V5) + } + + return queryStr, queryArgs +} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore new file mode 100644 index 0000000000..042091d9b3 --- /dev/null +++ b/vendor/github.com/golang/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 0000000000..52ccb5a934 --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,18 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Amazon.com, Inc +Damian Gryski +Eric Buth +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Klaus Post +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 0000000000..ea6524ddd0 --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,41 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Alex Legg +Damian Gryski +Eric Buth +Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney +Kai Backman +Klaus Post +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 0000000000..6050c10f4c --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 0000000000..cea12879a0 --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 0000000000..23c6e26c6b --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 0000000000..e6179f65e3 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s new file mode 100644 index 0000000000..7a3ead17ea --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_arm64.s @@ -0,0 +1,494 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R2 scratch +// - R3 scratch +// - R4 length or x +// - R5 offset +// - R6 &src[s] +// - R7 &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. +// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. +TEXT ·decode(SB), NOSPLIT, $56-56 + // Initialize R6, R7 and R8-R13. + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R7 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R6 + MOVD R11, R13 + ADD R12, R13, R13 + +loop: + // for s < len(src) + CMP R13, R6 + BEQ end + + // R4 = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R6), R4 + MOVW R4, R3 + ANDW $3, R3 + MOVW $1, R1 + CMPW R1, R3 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + LSRW $2, R4, R4 + CMPW R4, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R6, R6 + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R4 == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R4 can hold 64 bits, so the increment cannot overflow. + ADD $1, R4, R4 + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R2 = len(dst) - d + // R3 = len(src) - s + MOVD R10, R2 + SUB R7, R2, R2 + MOVD R13, R3 + SUB R6, R3, R3 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMP $16, R4 + BGT callMemmove + CMP $16, R2 + BLT callMemmove + CMP $16, R3 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R6), (R14, R15) + STP (R14, R15), 0(R7) + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R2, R4 + BGT errCorrupt + CMP R3, R4 + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R7, 8(RSP) + MOVD R6, 16(RSP) + MOVD R4, 24(RSP) + MOVD R7, 32(RSP) + MOVD R6, 40(RSP) + MOVD R4, 48(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVD 32(RSP), R7 + MOVD 40(RSP), R6 + MOVD 48(RSP), R4 + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R13 + ADD R12, R13, R13 + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R4, R6, R6 + SUB $58, R6, R6 + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // case x == 60: + MOVW $61, R1 + CMPW R1, R4 + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R6), R4 + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R6), R4 + B doLit + +tagLit62Plus: + CMPW $62, R4 + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R6), R4 + MOVBU -1(R6), R3 + ORR R3<<16, R4 + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R6), R4 + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R6), R5 + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R6), R5 + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R3 == src[s] & 0x03 + // - R4 == src[s] + CMP $2, R3 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVD R4, R5 + AND $0xe0, R5 + MOVBU -1(R6), R3 + ORR R5<<3, R3, R5 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R4>>2, R1, R4 + ADD $4, R4, R4 + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R4 == length && R4 > 0 + // - R5 == offset + + // if offset <= 0 { etc } + MOVD $0, R1 + CMP R1, R5 + BLE errCorrupt + + // if d < offset { etc } + MOVD R7, R3 + SUB R8, R3, R3 + CMP R5, R3 + BLT errCorrupt + + // if length > len(dst)-d { etc } + MOVD R10, R3 + SUB R7, R3, R3 + CMP R3, R4 + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVD R10, R14 + SUB R7, R14, R14 + MOVD R7, R15 + SUB R5, R15, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMP $16, R4 + BGT slowForwardCopy + CMP $8, R5 + BLT slowForwardCopy + CMP $16, R14 + BLT slowForwardCopy + MOVD 0(R15), R2 + MOVD R2, 0(R7) + MOVD 8(R15), R3 + MOVD R3, 8(R7) + ADD R4, R7, R7 + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R14, R14 + CMP R14, R4 + BGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMP $8, R5 + BGE fixUpSlowForwardCopy + MOVD (R15), R3 + MOVD R3, (R7) + SUB R5, R4, R4 + ADD R5, R7, R7 + ADD R5, R5, R5 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R7 being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R7 to R2 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R7, R2 + ADD R4, R7, R7 + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R4 + BLE loop + MOVD (R15), R3 + MOVD R3, (R2) + ADD $8, R15, R15 + ADD $8, R2, R2 + SUB $8, R4, R4 + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), R3 + MOVB R3, (R7) + ADD $1, R15, R15 + ADD $1, R7, R7 + SUB $1, R4, R4 + CBNZ R4, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R10, R7 + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R2 + MOVD R2, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/golang/snappy/decode_asm.go new file mode 100644 index 0000000000..7082b34919 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_asm.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm +// +build amd64 arm64 + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 0000000000..2f672be557 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!arm64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 0000000000..7f23657076 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 0000000000..adfd979fe2 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s new file mode 100644 index 0000000000..f8d54adfc5 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -0,0 +1,722 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - R3 len(lit) +// - R4 n +// - R6 return value +// - R8 &dst[i] +// - R10 &lit[0] +// +// The 32 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $32-56 + MOVD dst_base+0(FP), R8 + MOVD lit_base+24(FP), R10 + MOVD lit_len+32(FP), R3 + MOVD R3, R6 + MOVW R3, R4 + SUBW $1, R4, R4 + + CMPW $60, R4 + BLT oneByte + CMPW $256, R4 + BLT twoBytes + +threeBytes: + MOVD $0xf4, R2 + MOVB R2, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + ADD $3, R6, R6 + B memmove + +twoBytes: + MOVD $0xf0, R2 + MOVB R2, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + ADD $2, R6, R6 + B memmove + +oneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + ADD $1, R6, R6 + +memmove: + MOVD R6, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - R3 length +// - R7 &dst[0] +// - R8 &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVD dst_base+0(FP), R8 + MOVD R8, R7 + MOVD offset+24(FP), R11 + MOVD length+32(FP), R3 + +loop0: + // for length >= 68 { etc } + CMPW $68, R3 + BLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $64, R3, R3 + B loop0 + +step1: + // if length > 64 { etc } + CMP $64, R3 + BLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $60, R3, R3 + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMP $12, R3 + BGE step3 + CMPW $2048, R11 + BGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $3, R11, R11 + AND $0xe0, R11, R11 + SUB $4, R3, R3 + LSLW $2, R3 + AND $0xff, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUB $1, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - R6 &src[0] +// - R7 &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVD src_base+0(FP), R6 + MOVD src_len+8(FP), R14 + MOVD i+24(FP), R15 + MOVD j+32(FP), R7 + ADD R6, R14, R14 + ADD R6, R15, R15 + ADD R6, R7, R7 + MOVD R14, R13 + SUB $8, R13, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI cmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE bsf + ADD $8, R15, R15 + ADD $8, R7, R7 + B cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS extendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE extendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - R3 . . +// - R4 . . +// - R5 64 shift +// - R6 72 &src[0], tableSize +// - R7 80 &src[s] +// - R8 88 &dst[d] +// - R9 96 sLimit +// - R10 . &src[nextEmit] +// - R11 104 prevHash, currHash, nextHash, offset +// - R12 112 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 120 candidate +// - R16 . hash constant, 0x1e35a7bd +// - R17 . &table +// - . 128 table +// +// The second column (64, 72, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. +TEXT ·encodeBlock(SB), 0, $32896-56 + MOVD dst_base+0(FP), R8 + MOVD src_base+24(FP), R7 + MOVD src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVD $24, R5 + MOVD $256, R6 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + MOVD $16384, R2 + CMP R2, R6 + BGE varTable + CMP R14, R6 + BGE varTable + SUB $1, R5, R5 + LSL $1, R6, R6 + B calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each + // iterations writes 64 bytes, so we can do only tableSize/32 writes + // instead of the 2048 writes that would zero-initialize all of table's + // 32768 bytes. This clear could overrun the first tableSize elements, but + // it won't overrun the allocated stack size. + ADD $128, RSP, R17 + MOVD R17, R4 + + // !!! R6 = &src[tableSize] + ADD R6<<1, R17, R6 + +memclr: + STP.P (ZR, ZR), 64(R4) + STP (ZR, ZR), -48(R4) + STP (ZR, ZR), -32(R4) + STP (ZR, ZR), -16(R4) + CMP R4, R6 + BHI memclr + + // !!! R6 = &src[0] + MOVD R7, R6 + + // sLimit := len(src) - inputMargin + MOVD R14, R9 + SUB $15, R9, R9 + + // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't + // change for the rest of the function. + MOVD R5, 64(RSP) + MOVD R6, 72(RSP) + MOVD R9, 96(RSP) + + // nextEmit := 0 + MOVD R6, R10 + + // s := 1 + ADD $1, R7, R7 + + // nextHash := hash(load32(src, s), shift) + MOVW 0(R7), R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + +outer: + // for { etc } + + // skip := 32 + MOVD $32, R12 + + // nextS := s + MOVD R7, R13 + + // candidate := 0 + MOVD $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVD R13, R7 + + // bytesBetweenHashLookups := skip >> 5 + MOVD R12, R14 + LSR $5, R14, R14 + + // nextS = s + bytesBetweenHashLookups + ADD R14, R13, R13 + + // skip += bytesBetweenHashLookups + ADD R14, R12, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVD R13, R3 + SUB R6, R3, R3 + CMP R9, R3 + BHI emitRemainder + + // candidate = int(table[nextHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[nextHash] = uint16(s) + MOVD R7, R3 + SUB R6, R3, R3 + + MOVH R3, 0(R17)(R11<<1) + + // nextHash = hash(load32(src, nextS), shift) + MOVW 0(R13), R11 + MULW R16, R11 + LSRW R5, R11, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVW 0(R7), R3 + MOVW (R6)(R15), R4 + CMPW R4, R3 + BNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVD R7, R3 + SUB R10, R3, R3 + CMP $16, R3 + BLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVW R3, R4 + SUBW $1, R4, R4 + + MOVW $60, R2 + CMPW R2, R4 + BLT inlineEmitLiteralOneByte + MOVW $256, R2 + CMPW R2, R4 + BLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVD $0xf4, R1 + MOVB R1, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVD $0xf0, R1 + MOVB R1, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADD R3, R8, R8 + MOVD R7, 80(RSP) + MOVD R8, 88(RSP) + MOVD R15, 120(RSP) + CALL runtime·memmove(SB) + MOVD 64(RSP), R5 + MOVD 72(RSP), R6 + MOVD 80(RSP), R7 + MOVD 88(RSP), R8 + MOVD 96(RSP), R9 + MOVD 120(RSP), R15 + ADD $128, RSP, R17 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + B inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB R3, R4 + SUBW $1, R4, R4 + AND $0xff, R4, R4 + LSLW $2, R4, R4 + MOVB R4, (R8) + ADD $1, R8, R8 + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R10), (R0, R1) + STP (R0, R1), 0(R8) + ADD R3, R8, R8 + +inner1: + // for { etc } + + // base := s + MOVD R7, R12 + + // !!! offset := base - candidate + MOVD R12, R11 + SUB R15, R11, R11 + SUB R6, R11, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVD src_len+32(FP), R14 + ADD R6, R14, R14 + + // !!! R13 = &src[len(src) - 8] + MOVD R14, R13 + SUB $8, R13, R13 + + // !!! R15 = &src[candidate + 4] + ADD $4, R15, R15 + ADD R6, R15, R15 + + // !!! s += 4 + ADD $4, R7, R7 + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI inlineExtendMatchCmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchBSF + ADD $8, R15, R15 + ADD $8, R7, R7 + B inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + B inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS inlineExtendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVD R7, R3 + SUB R12, R3, R3 + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + MOVW $68, R2 + CMPW R2, R3 + BLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $64, R3, R3 + B inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + MOVW $64, R2 + CMPW R2, R3 + BLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $60, R3, R3 + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + MOVW $12, R2 + CMPW R2, R3 + BGE inlineEmitCopyStep3 + MOVW $2048, R2 + CMPW R2, R11 + BGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $8, R11, R11 + LSLW $5, R11, R11 + SUBW $4, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + B inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBW $1, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVD R7, R10 + + // if s >= sLimit { goto emitRemainder } + MOVD R7, R3 + SUB R6, R3, R3 + CMP R3, R9 + BLS emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVD -1(R7), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // table[prevHash] = uint16(s-1) + MOVD R7, R3 + SUB R6, R3, R3 + SUB $1, R3, R3 + + MOVHU R3, 0(R17)(R11<<1) + + // currHash := hash(uint32(x>>8), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // candidate = int(table[currHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[currHash] = uint16(s) + ADD $1, R3, R3 + MOVHU R3, 0(R17)(R11<<1) + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVW (R6)(R15), R4 + CMPW R4, R14 + BEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // s++ + ADD $1, R7, R7 + + // break out of the inner1 for loop, i.e. continue the outer loop. + B outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVD src_len+32(FP), R3 + ADD R6, R3, R3 + CMP R3, R10 + BEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVD R8, 8(RSP) + MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD R10, 32(RSP) + SUB R10, R3, R3 + MOVD R3, 40(RSP) + MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVD R8, 88(RSP) + CALL ·emitLiteral(SB) + MOVD 88(RSP), R8 + + // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVD 56(RSP), R1 + ADD R1, R8, R8 + +encodeBlockEnd: + MOVD dst_base+0(FP), R3 + SUB R3, R8, R8 + MOVD R8, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go new file mode 100644 index 0000000000..107c1e7141 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_asm.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm +// +build amd64 arm64 + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 0000000000..296d7f0beb --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!arm64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 0000000000..ece692ea46 --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/syndtr/goleveldb/LICENSE b/vendor/github.com/syndtr/goleveldb/LICENSE new file mode 100644 index 0000000000..4a772d1ab3 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/LICENSE @@ -0,0 +1,24 @@ +Copyright 2012 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go new file mode 100644 index 0000000000..225920002d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go @@ -0,0 +1,349 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// ErrBatchCorrupted records reason of batch corruption. This error will be +// wrapped with errors.ErrCorrupted. +type ErrBatchCorrupted struct { + Reason string +} + +func (e *ErrBatchCorrupted) Error() string { + return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason) +} + +func newErrBatchCorrupted(reason string) error { + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason}) +} + +const ( + batchHeaderLen = 8 + 4 + batchGrowRec = 3000 + batchBufioSize = 16 +) + +// BatchReplay wraps basic batch operations. +type BatchReplay interface { + Put(key, value []byte) + Delete(key []byte) +} + +type batchIndex struct { + keyType keyType + keyPos, keyLen int + valuePos, valueLen int +} + +func (index batchIndex) k(data []byte) []byte { + return data[index.keyPos : index.keyPos+index.keyLen] +} + +func (index batchIndex) v(data []byte) []byte { + if index.valueLen != 0 { + return data[index.valuePos : index.valuePos+index.valueLen] + } + return nil +} + +func (index batchIndex) kv(data []byte) (key, value []byte) { + return index.k(data), index.v(data) +} + +// Batch is a write batch. +type Batch struct { + data []byte + index []batchIndex + + // internalLen is sums of key/value pair length plus 8-bytes internal key. + internalLen int +} + +func (b *Batch) grow(n int) { + o := len(b.data) + if cap(b.data)-o < n { + div := 1 + if len(b.index) > batchGrowRec { + div = len(b.index) / batchGrowRec + } + ndata := make([]byte, o, o+n+o/div) + copy(ndata, b.data) + b.data = ndata + } +} + +func (b *Batch) appendRec(kt keyType, key, value []byte) { + n := 1 + binary.MaxVarintLen32 + len(key) + if kt == keyTypeVal { + n += binary.MaxVarintLen32 + len(value) + } + b.grow(n) + index := batchIndex{keyType: kt} + o := len(b.data) + data := b.data[:o+n] + data[o] = byte(kt) + o++ + o += binary.PutUvarint(data[o:], uint64(len(key))) + index.keyPos = o + index.keyLen = len(key) + o += copy(data[o:], key) + if kt == keyTypeVal { + o += binary.PutUvarint(data[o:], uint64(len(value))) + index.valuePos = o + index.valueLen = len(value) + o += copy(data[o:], value) + } + b.data = data[:o] + b.index = append(b.index, index) + b.internalLen += index.keyLen + index.valueLen + 8 +} + +// Put appends 'put operation' of the given key/value pair to the batch. +// It is safe to modify the contents of the argument after Put returns but not +// before. +func (b *Batch) Put(key, value []byte) { + b.appendRec(keyTypeVal, key, value) +} + +// Delete appends 'delete operation' of the given key to the batch. +// It is safe to modify the contents of the argument after Delete returns but +// not before. +func (b *Batch) Delete(key []byte) { + b.appendRec(keyTypeDel, key, nil) +} + +// Dump dumps batch contents. The returned slice can be loaded into the +// batch using Load method. +// The returned slice is not its own copy, so the contents should not be +// modified. +func (b *Batch) Dump() []byte { + return b.data +} + +// Load loads given slice into the batch. Previous contents of the batch +// will be discarded. +// The given slice will not be copied and will be used as batch buffer, so +// it is not safe to modify the contents of the slice. +func (b *Batch) Load(data []byte) error { + return b.decode(data, -1) +} + +// Replay replays batch contents. +func (b *Batch) Replay(r BatchReplay) error { + for _, index := range b.index { + switch index.keyType { + case keyTypeVal: + r.Put(index.k(b.data), index.v(b.data)) + case keyTypeDel: + r.Delete(index.k(b.data)) + } + } + return nil +} + +// Len returns number of records in the batch. +func (b *Batch) Len() int { + return len(b.index) +} + +// Reset resets the batch. +func (b *Batch) Reset() { + b.data = b.data[:0] + b.index = b.index[:0] + b.internalLen = 0 +} + +func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error { + for i, index := range b.index { + if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil { + return err + } + } + return nil +} + +func (b *Batch) append(p *Batch) { + ob := len(b.data) + oi := len(b.index) + b.data = append(b.data, p.data...) + b.index = append(b.index, p.index...) + b.internalLen += p.internalLen + + // Updating index offset. + if ob != 0 { + for ; oi < len(b.index); oi++ { + index := &b.index[oi] + index.keyPos += ob + if index.valueLen != 0 { + index.valuePos += ob + } + } + } +} + +func (b *Batch) decode(data []byte, expectedLen int) error { + b.data = data + b.index = b.index[:0] + b.internalLen = 0 + err := decodeBatch(data, func(i int, index batchIndex) error { + b.index = append(b.index, index) + b.internalLen += index.keyLen + index.valueLen + 8 + return nil + }) + if err != nil { + return err + } + if expectedLen >= 0 && len(b.index) != expectedLen { + return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index))) + } + return nil +} + +func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error { + var ik []byte + for i, index := range b.index { + ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) + if err := mdb.Put(ik, index.v(b.data)); err != nil { + return err + } + } + return nil +} + +func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error { + var ik []byte + for i, index := range b.index { + ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) + if err := mdb.Delete(ik); err != nil { + return err + } + } + return nil +} + +func newBatch() interface{} { + return &Batch{} +} + +func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error { + var index batchIndex + for i, o := 0, 0; o < len(data); i++ { + // Key type. + index.keyType = keyType(data[o]) + if index.keyType > keyTypeVal { + return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType))) + } + o++ + + // Key. + x, n := binary.Uvarint(data[o:]) + o += n + if n <= 0 || o+int(x) > len(data) { + return newErrBatchCorrupted("bad record: invalid key length") + } + index.keyPos = o + index.keyLen = int(x) + o += index.keyLen + + // Value. + if index.keyType == keyTypeVal { + x, n = binary.Uvarint(data[o:]) + o += n + if n <= 0 || o+int(x) > len(data) { + return newErrBatchCorrupted("bad record: invalid value length") + } + index.valuePos = o + index.valueLen = int(x) + o += index.valueLen + } else { + index.valuePos = 0 + index.valueLen = 0 + } + + if err := fn(i, index); err != nil { + return err + } + } + return nil +} + +func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) { + seq, batchLen, err = decodeBatchHeader(data) + if err != nil { + return 0, 0, err + } + if seq < expectSeq { + return 0, 0, newErrBatchCorrupted("invalid sequence number") + } + data = data[batchHeaderLen:] + var ik []byte + var decodedLen int + err = decodeBatch(data, func(i int, index batchIndex) error { + if i >= batchLen { + return newErrBatchCorrupted("invalid records length") + } + ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType) + if err := mdb.Put(ik, index.v(data)); err != nil { + return err + } + decodedLen++ + return nil + }) + if err == nil && decodedLen != batchLen { + err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen)) + } + return +} + +func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte { + dst = ensureBuffer(dst, batchHeaderLen) + binary.LittleEndian.PutUint64(dst, seq) + binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen)) + return dst +} + +func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) { + if len(data) < batchHeaderLen { + return 0, 0, newErrBatchCorrupted("too short") + } + + seq = binary.LittleEndian.Uint64(data) + batchLen = int(binary.LittleEndian.Uint32(data[8:])) + if batchLen < 0 { + return 0, 0, newErrBatchCorrupted("invalid records length") + } + return +} + +func batchesLen(batches []*Batch) int { + batchLen := 0 + for _, batch := range batches { + batchLen += batch.Len() + } + return batchLen +} + +func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error { + if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil { + return err + } + for _, batch := range batches { + if _, err := wr.Write(batch.data); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go new file mode 100644 index 0000000000..c36ad32359 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go @@ -0,0 +1,704 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package cache provides interface and implementation of a cache algorithms. +package cache + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Cacher provides interface to implements a caching functionality. +// An implementation must be safe for concurrent use. +type Cacher interface { + // Capacity returns cache capacity. + Capacity() int + + // SetCapacity sets cache capacity. + SetCapacity(capacity int) + + // Promote promotes the 'cache node'. + Promote(n *Node) + + // Ban evicts the 'cache node' and prevent subsequent 'promote'. + Ban(n *Node) + + // Evict evicts the 'cache node'. + Evict(n *Node) + + // EvictNS evicts 'cache node' with the given namespace. + EvictNS(ns uint64) + + // EvictAll evicts all 'cache node'. + EvictAll() + + // Close closes the 'cache tree' + Close() error +} + +// Value is a 'cacheable object'. It may implements util.Releaser, if +// so the the Release method will be called once object is released. +type Value interface{} + +// NamespaceGetter provides convenient wrapper for namespace. +type NamespaceGetter struct { + Cache *Cache + NS uint64 +} + +// Get simply calls Cache.Get() method. +func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { + return g.Cache.Get(g.NS, key, setFunc) +} + +// The hash tables implementation is based on: +// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, +// Kunlong Zhang, and Michael Spear. +// ACM Symposium on Principles of Distributed Computing, Jul 2014. + +const ( + mInitialSize = 1 << 4 + mOverflowThreshold = 1 << 5 + mOverflowGrowThreshold = 1 << 7 +) + +type mBucket struct { + mu sync.Mutex + node []*Node + frozen bool +} + +func (b *mBucket) freeze() []*Node { + b.mu.Lock() + defer b.mu.Unlock() + if !b.frozen { + b.frozen = true + } + return b.node +} + +func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { + b.mu.Lock() + + if b.frozen { + b.mu.Unlock() + return + } + + // Scan the node. + for _, n := range b.node { + if n.hash == hash && n.ns == ns && n.key == key { + atomic.AddInt32(&n.ref, 1) + b.mu.Unlock() + return true, false, n + } + } + + // Get only. + if noset { + b.mu.Unlock() + return true, false, nil + } + + // Create node. + n = &Node{ + r: r, + hash: hash, + ns: ns, + key: key, + ref: 1, + } + // Add node to bucket. + b.node = append(b.node, n) + bLen := len(b.node) + b.mu.Unlock() + + // Update counter. + grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold + if bLen > mOverflowThreshold { + grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold + } + + // Grow. + if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { + nhLen := len(h.buckets) << 1 + nh := &mNode{ + buckets: make([]unsafe.Pointer, nhLen), + mask: uint32(nhLen) - 1, + pred: unsafe.Pointer(h), + growThreshold: int32(nhLen * mOverflowThreshold), + shrinkThreshold: int32(nhLen >> 1), + } + ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) + if !ok { + panic("BUG: failed swapping head") + } + go nh.initBuckets() + } + + return true, true, n +} + +func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { + b.mu.Lock() + + if b.frozen { + b.mu.Unlock() + return + } + + // Scan the node. + var ( + n *Node + bLen int + ) + for i := range b.node { + n = b.node[i] + if n.ns == ns && n.key == key { + if atomic.LoadInt32(&n.ref) == 0 { + deleted = true + + // Call releaser. + if n.value != nil { + if r, ok := n.value.(util.Releaser); ok { + r.Release() + } + n.value = nil + } + + // Remove node from bucket. + b.node = append(b.node[:i], b.node[i+1:]...) + bLen = len(b.node) + } + break + } + } + b.mu.Unlock() + + if deleted { + // Call OnDel. + for _, f := range n.onDel { + f() + } + + // Update counter. + atomic.AddInt32(&r.size, int32(n.size)*-1) + shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold + if bLen >= mOverflowThreshold { + atomic.AddInt32(&h.overflow, -1) + } + + // Shrink. + if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { + nhLen := len(h.buckets) >> 1 + nh := &mNode{ + buckets: make([]unsafe.Pointer, nhLen), + mask: uint32(nhLen) - 1, + pred: unsafe.Pointer(h), + growThreshold: int32(nhLen * mOverflowThreshold), + shrinkThreshold: int32(nhLen >> 1), + } + ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) + if !ok { + panic("BUG: failed swapping head") + } + go nh.initBuckets() + } + } + + return true, deleted +} + +type mNode struct { + buckets []unsafe.Pointer // []*mBucket + mask uint32 + pred unsafe.Pointer // *mNode + resizeInProgess int32 + + overflow int32 + growThreshold int32 + shrinkThreshold int32 +} + +func (n *mNode) initBucket(i uint32) *mBucket { + if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { + return b + } + + p := (*mNode)(atomic.LoadPointer(&n.pred)) + if p != nil { + var node []*Node + if n.mask > p.mask { + // Grow. + pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) + if pb == nil { + pb = p.initBucket(i & p.mask) + } + m := pb.freeze() + // Split nodes. + for _, x := range m { + if x.hash&n.mask == i { + node = append(node, x) + } + } + } else { + // Shrink. + pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) + if pb0 == nil { + pb0 = p.initBucket(i) + } + pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) + if pb1 == nil { + pb1 = p.initBucket(i + uint32(len(n.buckets))) + } + m0 := pb0.freeze() + m1 := pb1.freeze() + // Merge nodes. + node = make([]*Node, 0, len(m0)+len(m1)) + node = append(node, m0...) + node = append(node, m1...) + } + b := &mBucket{node: node} + if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { + if len(node) > mOverflowThreshold { + atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) + } + return b + } + } + + return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) +} + +func (n *mNode) initBuckets() { + for i := range n.buckets { + n.initBucket(uint32(i)) + } + atomic.StorePointer(&n.pred, nil) +} + +// Cache is a 'cache map'. +type Cache struct { + mu sync.RWMutex + mHead unsafe.Pointer // *mNode + nodes int32 + size int32 + cacher Cacher + closed bool +} + +// NewCache creates a new 'cache map'. The cacher is optional and +// may be nil. +func NewCache(cacher Cacher) *Cache { + h := &mNode{ + buckets: make([]unsafe.Pointer, mInitialSize), + mask: mInitialSize - 1, + growThreshold: int32(mInitialSize * mOverflowThreshold), + shrinkThreshold: 0, + } + for i := range h.buckets { + h.buckets[i] = unsafe.Pointer(&mBucket{}) + } + r := &Cache{ + mHead: unsafe.Pointer(h), + cacher: cacher, + } + return r +} + +func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { + h := (*mNode)(atomic.LoadPointer(&r.mHead)) + i := hash & h.mask + b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) + if b == nil { + b = h.initBucket(i) + } + return h, b +} + +func (r *Cache) delete(n *Node) bool { + for { + h, b := r.getBucket(n.hash) + done, deleted := b.delete(r, h, n.hash, n.ns, n.key) + if done { + return deleted + } + } +} + +// Nodes returns number of 'cache node' in the map. +func (r *Cache) Nodes() int { + return int(atomic.LoadInt32(&r.nodes)) +} + +// Size returns sums of 'cache node' size in the map. +func (r *Cache) Size() int { + return int(atomic.LoadInt32(&r.size)) +} + +// Capacity returns cache capacity. +func (r *Cache) Capacity() int { + if r.cacher == nil { + return 0 + } + return r.cacher.Capacity() +} + +// SetCapacity sets cache capacity. +func (r *Cache) SetCapacity(capacity int) { + if r.cacher != nil { + r.cacher.SetCapacity(capacity) + } +} + +// Get gets 'cache node' with the given namespace and key. +// If cache node is not found and setFunc is not nil, Get will atomically creates +// the 'cache node' by calling setFunc. Otherwise Get will returns nil. +// +// The returned 'cache handle' should be released after use by calling Release +// method. +func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return nil + } + + hash := murmur32(ns, key, 0xf00) + for { + h, b := r.getBucket(hash) + done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) + if done { + if n != nil { + n.mu.Lock() + if n.value == nil { + if setFunc == nil { + n.mu.Unlock() + n.unref() + return nil + } + + n.size, n.value = setFunc() + if n.value == nil { + n.size = 0 + n.mu.Unlock() + n.unref() + return nil + } + atomic.AddInt32(&r.size, int32(n.size)) + } + n.mu.Unlock() + if r.cacher != nil { + r.cacher.Promote(n) + } + return &Handle{unsafe.Pointer(n)} + } + + break + } + } + return nil +} + +// Delete removes and ban 'cache node' with the given namespace and key. +// A banned 'cache node' will never inserted into the 'cache tree'. Ban +// only attributed to the particular 'cache node', so when a 'cache node' +// is recreated it will not be banned. +// +// If onDel is not nil, then it will be executed if such 'cache node' +// doesn't exist or once the 'cache node' is released. +// +// Delete return true is such 'cache node' exist. +func (r *Cache) Delete(ns, key uint64, onDel func()) bool { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return false + } + + hash := murmur32(ns, key, 0xf00) + for { + h, b := r.getBucket(hash) + done, _, n := b.get(r, h, hash, ns, key, true) + if done { + if n != nil { + if onDel != nil { + n.mu.Lock() + n.onDel = append(n.onDel, onDel) + n.mu.Unlock() + } + if r.cacher != nil { + r.cacher.Ban(n) + } + n.unref() + return true + } + + break + } + } + + if onDel != nil { + onDel() + } + + return false +} + +// Evict evicts 'cache node' with the given namespace and key. This will +// simply call Cacher.Evict. +// +// Evict return true is such 'cache node' exist. +func (r *Cache) Evict(ns, key uint64) bool { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return false + } + + hash := murmur32(ns, key, 0xf00) + for { + h, b := r.getBucket(hash) + done, _, n := b.get(r, h, hash, ns, key, true) + if done { + if n != nil { + if r.cacher != nil { + r.cacher.Evict(n) + } + n.unref() + return true + } + + break + } + } + + return false +} + +// EvictNS evicts 'cache node' with the given namespace. This will +// simply call Cacher.EvictNS. +func (r *Cache) EvictNS(ns uint64) { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return + } + + if r.cacher != nil { + r.cacher.EvictNS(ns) + } +} + +// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll. +func (r *Cache) EvictAll() { + r.mu.RLock() + defer r.mu.RUnlock() + if r.closed { + return + } + + if r.cacher != nil { + r.cacher.EvictAll() + } +} + +// Close closes the 'cache map' and forcefully releases all 'cache node'. +func (r *Cache) Close() error { + r.mu.Lock() + if !r.closed { + r.closed = true + + h := (*mNode)(r.mHead) + h.initBuckets() + + for i := range h.buckets { + b := (*mBucket)(h.buckets[i]) + for _, n := range b.node { + // Call releaser. + if n.value != nil { + if r, ok := n.value.(util.Releaser); ok { + r.Release() + } + n.value = nil + } + + // Call OnDel. + for _, f := range n.onDel { + f() + } + n.onDel = nil + } + } + } + r.mu.Unlock() + + // Avoid deadlock. + if r.cacher != nil { + if err := r.cacher.Close(); err != nil { + return err + } + } + return nil +} + +// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but +// unlike Close it doesn't forcefully releases 'cache node'. +func (r *Cache) CloseWeak() error { + r.mu.Lock() + if !r.closed { + r.closed = true + } + r.mu.Unlock() + + // Avoid deadlock. + if r.cacher != nil { + r.cacher.EvictAll() + if err := r.cacher.Close(); err != nil { + return err + } + } + return nil +} + +// Node is a 'cache node'. +type Node struct { + r *Cache + + hash uint32 + ns, key uint64 + + mu sync.Mutex + size int + value Value + + ref int32 + onDel []func() + + CacheData unsafe.Pointer +} + +// NS returns this 'cache node' namespace. +func (n *Node) NS() uint64 { + return n.ns +} + +// Key returns this 'cache node' key. +func (n *Node) Key() uint64 { + return n.key +} + +// Size returns this 'cache node' size. +func (n *Node) Size() int { + return n.size +} + +// Value returns this 'cache node' value. +func (n *Node) Value() Value { + return n.value +} + +// Ref returns this 'cache node' ref counter. +func (n *Node) Ref() int32 { + return atomic.LoadInt32(&n.ref) +} + +// GetHandle returns an handle for this 'cache node'. +func (n *Node) GetHandle() *Handle { + if atomic.AddInt32(&n.ref, 1) <= 1 { + panic("BUG: Node.GetHandle on zero ref") + } + return &Handle{unsafe.Pointer(n)} +} + +func (n *Node) unref() { + if atomic.AddInt32(&n.ref, -1) == 0 { + n.r.delete(n) + } +} + +func (n *Node) unrefLocked() { + if atomic.AddInt32(&n.ref, -1) == 0 { + n.r.mu.RLock() + if !n.r.closed { + n.r.delete(n) + } + n.r.mu.RUnlock() + } +} + +// Handle is a 'cache handle' of a 'cache node'. +type Handle struct { + n unsafe.Pointer // *Node +} + +// Value returns the value of the 'cache node'. +func (h *Handle) Value() Value { + n := (*Node)(atomic.LoadPointer(&h.n)) + if n != nil { + return n.value + } + return nil +} + +// Release releases this 'cache handle'. +// It is safe to call release multiple times. +func (h *Handle) Release() { + nPtr := atomic.LoadPointer(&h.n) + if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { + n := (*Node)(nPtr) + n.unrefLocked() + } +} + +func murmur32(ns, key uint64, seed uint32) uint32 { + const ( + m = uint32(0x5bd1e995) + r = 24 + ) + + k1 := uint32(ns >> 32) + k2 := uint32(ns) + k3 := uint32(key >> 32) + k4 := uint32(key) + + k1 *= m + k1 ^= k1 >> r + k1 *= m + + k2 *= m + k2 ^= k2 >> r + k2 *= m + + k3 *= m + k3 ^= k3 >> r + k3 *= m + + k4 *= m + k4 ^= k4 >> r + k4 *= m + + h := seed + + h *= m + h ^= k1 + h *= m + h ^= k2 + h *= m + h ^= k3 + h *= m + h ^= k4 + + h ^= h >> 13 + h *= m + h ^= h >> 15 + + return h +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go new file mode 100644 index 0000000000..d9a84cde15 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go @@ -0,0 +1,195 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package cache + +import ( + "sync" + "unsafe" +) + +type lruNode struct { + n *Node + h *Handle + ban bool + + next, prev *lruNode +} + +func (n *lruNode) insert(at *lruNode) { + x := at.next + at.next = n + n.prev = at + n.next = x + x.prev = n +} + +func (n *lruNode) remove() { + if n.prev != nil { + n.prev.next = n.next + n.next.prev = n.prev + n.prev = nil + n.next = nil + } else { + panic("BUG: removing removed node") + } +} + +type lru struct { + mu sync.Mutex + capacity int + used int + recent lruNode +} + +func (r *lru) reset() { + r.recent.next = &r.recent + r.recent.prev = &r.recent + r.used = 0 +} + +func (r *lru) Capacity() int { + r.mu.Lock() + defer r.mu.Unlock() + return r.capacity +} + +func (r *lru) SetCapacity(capacity int) { + var evicted []*lruNode + + r.mu.Lock() + r.capacity = capacity + for r.used > r.capacity { + rn := r.recent.prev + if rn == nil { + panic("BUG: invalid LRU used or capacity counter") + } + rn.remove() + rn.n.CacheData = nil + r.used -= rn.n.Size() + evicted = append(evicted, rn) + } + r.mu.Unlock() + + for _, rn := range evicted { + rn.h.Release() + } +} + +func (r *lru) Promote(n *Node) { + var evicted []*lruNode + + r.mu.Lock() + if n.CacheData == nil { + if n.Size() <= r.capacity { + rn := &lruNode{n: n, h: n.GetHandle()} + rn.insert(&r.recent) + n.CacheData = unsafe.Pointer(rn) + r.used += n.Size() + + for r.used > r.capacity { + rn := r.recent.prev + if rn == nil { + panic("BUG: invalid LRU used or capacity counter") + } + rn.remove() + rn.n.CacheData = nil + r.used -= rn.n.Size() + evicted = append(evicted, rn) + } + } + } else { + rn := (*lruNode)(n.CacheData) + if !rn.ban { + rn.remove() + rn.insert(&r.recent) + } + } + r.mu.Unlock() + + for _, rn := range evicted { + rn.h.Release() + } +} + +func (r *lru) Ban(n *Node) { + r.mu.Lock() + if n.CacheData == nil { + n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true}) + } else { + rn := (*lruNode)(n.CacheData) + if !rn.ban { + rn.remove() + rn.ban = true + r.used -= rn.n.Size() + r.mu.Unlock() + + rn.h.Release() + rn.h = nil + return + } + } + r.mu.Unlock() +} + +func (r *lru) Evict(n *Node) { + r.mu.Lock() + rn := (*lruNode)(n.CacheData) + if rn == nil || rn.ban { + r.mu.Unlock() + return + } + n.CacheData = nil + r.mu.Unlock() + + rn.h.Release() +} + +func (r *lru) EvictNS(ns uint64) { + var evicted []*lruNode + + r.mu.Lock() + for e := r.recent.prev; e != &r.recent; { + rn := e + e = e.prev + if rn.n.NS() == ns { + rn.remove() + rn.n.CacheData = nil + r.used -= rn.n.Size() + evicted = append(evicted, rn) + } + } + r.mu.Unlock() + + for _, rn := range evicted { + rn.h.Release() + } +} + +func (r *lru) EvictAll() { + r.mu.Lock() + back := r.recent.prev + for rn := back; rn != &r.recent; rn = rn.prev { + rn.n.CacheData = nil + } + r.reset() + r.mu.Unlock() + + for rn := back; rn != &r.recent; rn = rn.prev { + rn.h.Release() + } +} + +func (r *lru) Close() error { + return nil +} + +// NewLRU create a new LRU-cache. +func NewLRU(capacity int) Cacher { + r := &lru{capacity: capacity} + r.reset() + return r +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go new file mode 100644 index 0000000000..448402b826 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go @@ -0,0 +1,67 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/comparer" +) + +type iComparer struct { + ucmp comparer.Comparer +} + +func (icmp *iComparer) uName() string { + return icmp.ucmp.Name() +} + +func (icmp *iComparer) uCompare(a, b []byte) int { + return icmp.ucmp.Compare(a, b) +} + +func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { + return icmp.ucmp.Separator(dst, a, b) +} + +func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { + return icmp.ucmp.Successor(dst, b) +} + +func (icmp *iComparer) Name() string { + return icmp.uName() +} + +func (icmp *iComparer) Compare(a, b []byte) int { + x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey()) + if x == 0 { + if m, n := internalKey(a).num(), internalKey(b).num(); m > n { + return -1 + } else if m < n { + return 1 + } + } + return x +} + +func (icmp *iComparer) Separator(dst, a, b []byte) []byte { + ua, ub := internalKey(a).ukey(), internalKey(b).ukey() + dst = icmp.uSeparator(dst, ua, ub) + if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { + // Append earliest possible number. + return append(dst, keyMaxNumBytes...) + } + return nil +} + +func (icmp *iComparer) Successor(dst, b []byte) []byte { + ub := internalKey(b).ukey() + dst = icmp.uSuccessor(dst, ub) + if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { + // Append earliest possible number. + return append(dst, keyMaxNumBytes...) + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go new file mode 100644 index 0000000000..abf9fb65c7 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go @@ -0,0 +1,51 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package comparer + +import "bytes" + +type bytesComparer struct{} + +func (bytesComparer) Compare(a, b []byte) int { + return bytes.Compare(a, b) +} + +func (bytesComparer) Name() string { + return "leveldb.BytewiseComparator" +} + +func (bytesComparer) Separator(dst, a, b []byte) []byte { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for ; i < n && a[i] == b[i]; i++ { + } + if i >= n { + // Do not shorten if one string is a prefix of the other + } else if c := a[i]; c < 0xff && c+1 < b[i] { + dst = append(dst, a[:i+1]...) + dst[len(dst)-1]++ + return dst + } + return nil +} + +func (bytesComparer) Successor(dst, b []byte) []byte { + for i, c := range b { + if c != 0xff { + dst = append(dst, b[:i+1]...) + dst[len(dst)-1]++ + return dst + } + } + return nil +} + +// DefaultComparer are default implementation of the Comparer interface. +// It uses the natural ordering, consistent with bytes.Compare. +var DefaultComparer = bytesComparer{} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go new file mode 100644 index 0000000000..2c522db23b --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go @@ -0,0 +1,57 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package comparer provides interface and implementation for ordering +// sets of data. +package comparer + +// BasicComparer is the interface that wraps the basic Compare method. +type BasicComparer interface { + // Compare returns -1, 0, or +1 depending on whether a is 'less than', + // 'equal to' or 'greater than' b. The two arguments can only be 'equal' + // if their contents are exactly equal. Furthermore, the empty slice + // must be 'less than' any non-empty slice. + Compare(a, b []byte) int +} + +// Comparer defines a total ordering over the space of []byte keys: a 'less +// than' relationship. +type Comparer interface { + BasicComparer + + // Name returns name of the comparer. + // + // The Level-DB on-disk format stores the comparer name, and opening a + // database with a different comparer from the one it was created with + // will result in an error. + // + // An implementation to a new name whenever the comparer implementation + // changes in a way that will cause the relative ordering of any two keys + // to change. + // + // Names starting with "leveldb." are reserved and should not be used + // by any users of this package. + Name() string + + // Bellow are advanced functions used to reduce the space requirements + // for internal data structures such as index blocks. + + // Separator appends a sequence of bytes x to dst such that a <= x && x < b, + // where 'less than' is consistent with Compare. An implementation should + // return nil if x equal to a. + // + // Either contents of a or b should not by any means modified. Doing so + // may cause corruption on the internal state. + Separator(dst, a, b []byte) []byte + + // Successor appends a sequence of bytes x to dst such that x >= b, where + // 'less than' is consistent with Compare. An implementation should return + // nil if x equal to b. + // + // Contents of b should not by any means modified. Doing so may cause + // corruption on the internal state. + Successor(dst, b []byte) []byte +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go new file mode 100644 index 0000000000..90fedf7bd7 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go @@ -0,0 +1,1179 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "container/list" + "fmt" + "io" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/table" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// DB is a LevelDB database. +type DB struct { + // Need 64-bit alignment. + seq uint64 + + // Stats. Need 64-bit alignment. + cWriteDelay int64 // The cumulative duration of write delays + cWriteDelayN int32 // The cumulative number of write delays + inWritePaused int32 // The indicator whether write operation is paused by compaction + aliveSnaps, aliveIters int32 + + // Session. + s *session + + // MemDB. + memMu sync.RWMutex + memPool chan *memdb.DB + mem, frozenMem *memDB + journal *journal.Writer + journalWriter storage.Writer + journalFd storage.FileDesc + frozenJournalFd storage.FileDesc + frozenSeq uint64 + + // Snapshot. + snapsMu sync.Mutex + snapsList *list.List + + // Write. + batchPool sync.Pool + writeMergeC chan writeMerge + writeMergedC chan bool + writeLockC chan struct{} + writeAckC chan error + writeDelay time.Duration + writeDelayN int + tr *Transaction + + // Compaction. + compCommitLk sync.Mutex + tcompCmdC chan cCmd + tcompPauseC chan chan<- struct{} + mcompCmdC chan cCmd + compErrC chan error + compPerErrC chan error + compErrSetC chan error + compWriteLocking bool + compStats cStats + memdbMaxLevel int // For testing. + + // Close. + closeW sync.WaitGroup + closeC chan struct{} + closed uint32 + closer io.Closer +} + +func openDB(s *session) (*DB, error) { + s.log("db@open opening") + start := time.Now() + db := &DB{ + s: s, + // Initial sequence + seq: s.stSeqNum, + // MemDB + memPool: make(chan *memdb.DB, 1), + // Snapshot + snapsList: list.New(), + // Write + batchPool: sync.Pool{New: newBatch}, + writeMergeC: make(chan writeMerge), + writeMergedC: make(chan bool), + writeLockC: make(chan struct{}, 1), + writeAckC: make(chan error), + // Compaction + tcompCmdC: make(chan cCmd), + tcompPauseC: make(chan chan<- struct{}), + mcompCmdC: make(chan cCmd), + compErrC: make(chan error), + compPerErrC: make(chan error), + compErrSetC: make(chan error), + // Close + closeC: make(chan struct{}), + } + + // Read-only mode. + readOnly := s.o.GetReadOnly() + + if readOnly { + // Recover journals (read-only mode). + if err := db.recoverJournalRO(); err != nil { + return nil, err + } + } else { + // Recover journals. + if err := db.recoverJournal(); err != nil { + return nil, err + } + + // Remove any obsolete files. + if err := db.checkAndCleanFiles(); err != nil { + // Close journal. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + return nil, err + } + + } + + // Doesn't need to be included in the wait group. + go db.compactionError() + go db.mpoolDrain() + + if readOnly { + db.SetReadOnly() + } else { + db.closeW.Add(2) + go db.tCompaction() + go db.mCompaction() + // go db.jWriter() + } + + s.logf("db@open done T·%v", time.Since(start)) + + runtime.SetFinalizer(db, (*DB).Close) + return db, nil +} + +// Open opens or creates a DB for the given storage. +// The DB will be created if not exist, unless ErrorIfMissing is true. +// Also, if ErrorIfExist is true and the DB exist Open will returns +// os.ErrExist error. +// +// Open will return an error with type of ErrCorrupted if corruption +// detected in the DB. Use errors.IsCorrupted to test whether an error is +// due to corruption. Corrupted DB can be recovered with Recover function. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { + s, err := newSession(stor, o) + if err != nil { + return + } + defer func() { + if err != nil { + s.close() + s.release() + } + }() + + err = s.recover() + if err != nil { + if !os.IsNotExist(err) || s.o.GetErrorIfMissing() || s.o.GetReadOnly() { + return + } + err = s.create() + if err != nil { + return + } + } else if s.o.GetErrorIfExist() { + err = os.ErrExist + return + } + + return openDB(s) +} + +// OpenFile opens or creates a DB for the given path. +// The DB will be created if not exist, unless ErrorIfMissing is true. +// Also, if ErrorIfExist is true and the DB exist OpenFile will returns +// os.ErrExist error. +// +// OpenFile uses standard file-system backed storage implementation as +// described in the leveldb/storage package. +// +// OpenFile will return an error with type of ErrCorrupted if corruption +// detected in the DB. Use errors.IsCorrupted to test whether an error is +// due to corruption. Corrupted DB can be recovered with Recover function. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func OpenFile(path string, o *opt.Options) (db *DB, err error) { + stor, err := storage.OpenFile(path, o.GetReadOnly()) + if err != nil { + return + } + db, err = Open(stor, o) + if err != nil { + stor.Close() + } else { + db.closer = stor + } + return +} + +// Recover recovers and opens a DB with missing or corrupted manifest files +// for the given storage. It will ignore any manifest files, valid or not. +// The DB must already exist or it will returns an error. +// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { + s, err := newSession(stor, o) + if err != nil { + return + } + defer func() { + if err != nil { + s.close() + s.release() + } + }() + + err = recoverTable(s, o) + if err != nil { + return + } + return openDB(s) +} + +// RecoverFile recovers and opens a DB with missing or corrupted manifest files +// for the given path. It will ignore any manifest files, valid or not. +// The DB must already exist or it will returns an error. +// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. +// +// RecoverFile uses standard file-system backed storage implementation as described +// in the leveldb/storage package. +// +// The returned DB instance is safe for concurrent use. +// The DB must be closed after use, by calling Close method. +func RecoverFile(path string, o *opt.Options) (db *DB, err error) { + stor, err := storage.OpenFile(path, false) + if err != nil { + return + } + db, err = Recover(stor, o) + if err != nil { + stor.Close() + } else { + db.closer = stor + } + return +} + +func recoverTable(s *session, o *opt.Options) error { + o = dupOptions(o) + // Mask StrictReader, lets StrictRecovery doing its job. + o.Strict &= ^opt.StrictReader + + // Get all tables and sort it by file number. + fds, err := s.stor.List(storage.TypeTable) + if err != nil { + return err + } + sortFds(fds) + + var ( + maxSeq uint64 + recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int + + // We will drop corrupted table. + strict = o.GetStrict(opt.StrictRecovery) + noSync = o.GetNoSync() + + rec = &sessionRecord{} + bpool = util.NewBufferPool(o.GetBlockSize() + 5) + ) + buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) { + tmpFd = s.newTemp() + writer, err := s.stor.Create(tmpFd) + if err != nil { + return + } + defer func() { + writer.Close() + if err != nil { + s.stor.Remove(tmpFd) + tmpFd = storage.FileDesc{} + } + }() + + // Copy entries. + tw := table.NewWriter(writer, o) + for iter.Next() { + key := iter.Key() + if validInternalKey(key) { + err = tw.Append(key, iter.Value()) + if err != nil { + return + } + } + } + err = iter.Error() + if err != nil && !errors.IsCorrupted(err) { + return + } + err = tw.Close() + if err != nil { + return + } + if !noSync { + err = writer.Sync() + if err != nil { + return + } + } + size = int64(tw.BytesLen()) + return + } + recoverTable := func(fd storage.FileDesc) error { + s.logf("table@recovery recovering @%d", fd.Num) + reader, err := s.stor.Open(fd) + if err != nil { + return err + } + var closed bool + defer func() { + if !closed { + reader.Close() + } + }() + + // Get file size. + size, err := reader.Seek(0, 2) + if err != nil { + return err + } + + var ( + tSeq uint64 + tgoodKey, tcorruptedKey, tcorruptedBlock int + imin, imax []byte + ) + tr, err := table.NewReader(reader, size, fd, nil, bpool, o) + if err != nil { + return err + } + iter := tr.NewIterator(nil, nil) + if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok { + itererr.SetErrorCallback(func(err error) { + if errors.IsCorrupted(err) { + s.logf("table@recovery block corruption @%d %q", fd.Num, err) + tcorruptedBlock++ + } + }) + } + + // Scan the table. + for iter.Next() { + key := iter.Key() + _, seq, _, kerr := parseInternalKey(key) + if kerr != nil { + tcorruptedKey++ + continue + } + tgoodKey++ + if seq > tSeq { + tSeq = seq + } + if imin == nil { + imin = append([]byte{}, key...) + } + imax = append(imax[:0], key...) + } + if err := iter.Error(); err != nil && !errors.IsCorrupted(err) { + iter.Release() + return err + } + iter.Release() + + goodKey += tgoodKey + corruptedKey += tcorruptedKey + corruptedBlock += tcorruptedBlock + + if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { + droppedTable++ + s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) + return nil + } + + if tgoodKey > 0 { + if tcorruptedKey > 0 || tcorruptedBlock > 0 { + // Rebuild the table. + s.logf("table@recovery rebuilding @%d", fd.Num) + iter := tr.NewIterator(nil, nil) + tmpFd, newSize, err := buildTable(iter) + iter.Release() + if err != nil { + return err + } + closed = true + reader.Close() + if err := s.stor.Rename(tmpFd, fd); err != nil { + return err + } + size = newSize + } + if tSeq > maxSeq { + maxSeq = tSeq + } + recoveredKey += tgoodKey + // Add table to level 0. + rec.addTable(0, fd.Num, size, imin, imax) + s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) + } else { + droppedTable++ + s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size) + } + + return nil + } + + // Recover all tables. + if len(fds) > 0 { + s.logf("table@recovery F·%d", len(fds)) + + // Mark file number as used. + s.markFileNum(fds[len(fds)-1].Num) + + for _, fd := range fds { + if err := recoverTable(fd); err != nil { + return err + } + } + + s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq) + } + + // Set sequence number. + rec.setSeqNum(maxSeq) + + // Create new manifest. + if err := s.create(); err != nil { + return err + } + + // Commit. + return s.commit(rec) +} + +func (db *DB) recoverJournal() error { + // Get all journals and sort it by file number. + rawFds, err := db.s.stor.List(storage.TypeJournal) + if err != nil { + return err + } + sortFds(rawFds) + + // Journals that will be recovered. + var fds []storage.FileDesc + for _, fd := range rawFds { + if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { + fds = append(fds, fd) + } + } + + var ( + ofd storage.FileDesc // Obsolete file. + rec = &sessionRecord{} + ) + + // Recover journals. + if len(fds) > 0 { + db.logf("journal@recovery F·%d", len(fds)) + + // Mark file number as used. + db.s.markFileNum(fds[len(fds)-1].Num) + + var ( + // Options. + strict = db.s.o.GetStrict(opt.StrictJournal) + checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) + writeBuffer = db.s.o.GetWriteBuffer() + + jr *journal.Reader + mdb = memdb.New(db.s.icmp, writeBuffer) + buf = &util.Buffer{} + batchSeq uint64 + batchLen int + ) + + for _, fd := range fds { + db.logf("journal@recovery recovering @%d", fd.Num) + + fr, err := db.s.stor.Open(fd) + if err != nil { + return err + } + + // Create or reset journal reader instance. + if jr == nil { + jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) + } else { + jr.Reset(fr, dropper{db.s, fd}, strict, checksum) + } + + // Flush memdb and remove obsolete journal file. + if !ofd.Zero() { + if mdb.Len() > 0 { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { + fr.Close() + return err + } + } + + rec.setJournalNum(fd.Num) + rec.setSeqNum(db.seq) + if err := db.s.commit(rec); err != nil { + fr.Close() + return err + } + rec.resetAddedTables() + + db.s.stor.Remove(ofd) + ofd = storage.FileDesc{} + } + + // Replay journal to memdb. + mdb.Reset() + for { + r, err := jr.Next() + if err != nil { + if err == io.EOF { + break + } + + fr.Close() + return errors.SetFd(err, fd) + } + + buf.Reset() + if _, err := buf.ReadFrom(r); err != nil { + if err == io.ErrUnexpectedEOF { + // This is error returned due to corruption, with strict == false. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb) + if err != nil { + if !strict && errors.IsCorrupted(err) { + db.s.logf("journal error: %v (skipped)", err) + // We won't apply sequence number as it might be corrupted. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + + // Save sequence number. + db.seq = batchSeq + uint64(batchLen) + + // Flush it if large enough. + if mdb.Size() >= writeBuffer { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { + fr.Close() + return err + } + + mdb.Reset() + } + } + + fr.Close() + ofd = fd + } + + // Flush the last memdb. + if mdb.Len() > 0 { + if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil { + return err + } + } + } + + // Create a new journal. + if _, err := db.newMem(0); err != nil { + return err + } + + // Commit. + rec.setJournalNum(db.journalFd.Num) + rec.setSeqNum(db.seq) + if err := db.s.commit(rec); err != nil { + // Close journal on error. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + } + return err + } + + // Remove the last obsolete journal file. + if !ofd.Zero() { + db.s.stor.Remove(ofd) + } + + return nil +} + +func (db *DB) recoverJournalRO() error { + // Get all journals and sort it by file number. + rawFds, err := db.s.stor.List(storage.TypeJournal) + if err != nil { + return err + } + sortFds(rawFds) + + // Journals that will be recovered. + var fds []storage.FileDesc + for _, fd := range rawFds { + if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum { + fds = append(fds, fd) + } + } + + var ( + // Options. + strict = db.s.o.GetStrict(opt.StrictJournal) + checksum = db.s.o.GetStrict(opt.StrictJournalChecksum) + writeBuffer = db.s.o.GetWriteBuffer() + + mdb = memdb.New(db.s.icmp, writeBuffer) + ) + + // Recover journals. + if len(fds) > 0 { + db.logf("journal@recovery RO·Mode F·%d", len(fds)) + + var ( + jr *journal.Reader + buf = &util.Buffer{} + batchSeq uint64 + batchLen int + ) + + for _, fd := range fds { + db.logf("journal@recovery recovering @%d", fd.Num) + + fr, err := db.s.stor.Open(fd) + if err != nil { + return err + } + + // Create or reset journal reader instance. + if jr == nil { + jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) + } else { + jr.Reset(fr, dropper{db.s, fd}, strict, checksum) + } + + // Replay journal to memdb. + for { + r, err := jr.Next() + if err != nil { + if err == io.EOF { + break + } + + fr.Close() + return errors.SetFd(err, fd) + } + + buf.Reset() + if _, err := buf.ReadFrom(r); err != nil { + if err == io.ErrUnexpectedEOF { + // This is error returned due to corruption, with strict == false. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb) + if err != nil { + if !strict && errors.IsCorrupted(err) { + db.s.logf("journal error: %v (skipped)", err) + // We won't apply sequence number as it might be corrupted. + continue + } + + fr.Close() + return errors.SetFd(err, fd) + } + + // Save sequence number. + db.seq = batchSeq + uint64(batchLen) + } + + fr.Close() + } + } + + // Set memDB. + db.mem = &memDB{db: db, DB: mdb, ref: 1} + + return nil +} + +func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) { + mk, mv, err := mdb.Find(ikey) + if err == nil { + ukey, _, kt, kerr := parseInternalKey(mk) + if kerr != nil { + // Shouldn't have had happen. + panic(kerr) + } + if icmp.uCompare(ukey, ikey.ukey()) == 0 { + if kt == keyTypeDel { + return true, nil, ErrNotFound + } + return true, mv, nil + + } + } else if err != ErrNotFound { + return true, nil, err + } + return +} + +func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { + ikey := makeInternalKey(nil, key, seq, keyTypeSeek) + + if auxm != nil { + if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok { + return append([]byte{}, mv...), me + } + } + + em, fm := db.getMems() + for _, m := range [...]*memDB{em, fm} { + if m == nil { + continue + } + defer m.decref() + + if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok { + return append([]byte{}, mv...), me + } + } + + v := db.s.version() + value, cSched, err := v.get(auxt, ikey, ro, false) + v.release() + if cSched { + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) + } + return +} + +func nilIfNotFound(err error) error { + if err == ErrNotFound { + return nil + } + return err +} + +func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { + ikey := makeInternalKey(nil, key, seq, keyTypeSeek) + + if auxm != nil { + if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok { + return me == nil, nilIfNotFound(me) + } + } + + em, fm := db.getMems() + for _, m := range [...]*memDB{em, fm} { + if m == nil { + continue + } + defer m.decref() + + if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok { + return me == nil, nilIfNotFound(me) + } + } + + v := db.s.version() + _, cSched, err := v.get(auxt, ikey, ro, true) + v.release() + if cSched { + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) + } + if err == nil { + ret = true + } else if err == ErrNotFound { + err = nil + } + return +} + +// Get gets the value for the given key. It returns ErrNotFound if the +// DB does not contains the key. +// +// The returned slice is its own copy, it is safe to modify the contents +// of the returned slice. +// It is safe to modify the contents of the argument after Get returns. +func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + err = db.ok() + if err != nil { + return + } + + se := db.acquireSnapshot() + defer db.releaseSnapshot(se) + return db.get(nil, nil, key, se.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Has returns. +func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { + err = db.ok() + if err != nil { + return + } + + se := db.acquireSnapshot() + defer db.releaseSnapshot(se) + return db.has(nil, nil, key, se.seq, ro) +} + +// NewIterator returns an iterator for the latest snapshot of the +// underlying DB. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. The resultant key/value pairs are guaranteed to be +// consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if err := db.ok(); err != nil { + return iterator.NewEmptyIterator(err) + } + + se := db.acquireSnapshot() + defer db.releaseSnapshot(se) + // Iterator holds 'version' lock, 'version' is immutable so snapshot + // can be released after iterator created. + return db.newIterator(nil, nil, se.seq, slice, ro) +} + +// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot +// is a frozen snapshot of a DB state at a particular point in time. The +// content of snapshot are guaranteed to be consistent. +// +// The snapshot must be released after use, by calling Release method. +func (db *DB) GetSnapshot() (*Snapshot, error) { + if err := db.ok(); err != nil { + return nil, err + } + + return db.newSnapshot(), nil +} + +// GetProperty returns value of the given property name. +// +// Property names: +// leveldb.num-files-at-level{n} +// Returns the number of files at level 'n'. +// leveldb.stats +// Returns statistics of the underlying DB. +// leveldb.iostats +// Returns statistics of effective disk read and write. +// leveldb.writedelay +// Returns cumulative write delay caused by compaction. +// leveldb.sstables +// Returns sstables list for each level. +// leveldb.blockpool +// Returns block pool stats. +// leveldb.cachedblock +// Returns size of cached block. +// leveldb.openedtables +// Returns number of opened tables. +// leveldb.alivesnaps +// Returns number of alive snapshots. +// leveldb.aliveiters +// Returns number of alive iterators. +func (db *DB) GetProperty(name string) (value string, err error) { + err = db.ok() + if err != nil { + return + } + + const prefix = "leveldb." + if !strings.HasPrefix(name, prefix) { + return "", ErrNotFound + } + p := name[len(prefix):] + + v := db.s.version() + defer v.release() + + numFilesPrefix := "num-files-at-level" + switch { + case strings.HasPrefix(p, numFilesPrefix): + var level uint + var rest string + n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) + if n != 1 { + err = ErrNotFound + } else { + value = fmt.Sprint(v.tLen(int(level))) + } + case p == "stats": + value = "Compactions\n" + + " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + + "-------+------------+---------------+---------------+---------------+---------------\n" + for level, tables := range v.levels { + duration, read, write := db.compStats.getStat(level) + if len(tables) == 0 && duration == 0 { + continue + } + value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", + level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), + float64(read)/1048576.0, float64(write)/1048576.0) + } + case p == "iostats": + value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f", + float64(db.s.stor.reads())/1048576.0, + float64(db.s.stor.writes())/1048576.0) + case p == "writedelay": + writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay)) + paused := atomic.LoadInt32(&db.inWritePaused) == 1 + value = fmt.Sprintf("DelayN:%d Delay:%s Paused:%t", writeDelayN, writeDelay, paused) + case p == "sstables": + for level, tables := range v.levels { + value += fmt.Sprintf("--- level %d ---\n", level) + for _, t := range tables { + value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax) + } + } + case p == "blockpool": + value = fmt.Sprintf("%v", db.s.tops.bpool) + case p == "cachedblock": + if db.s.tops.bcache != nil { + value = fmt.Sprintf("%d", db.s.tops.bcache.Size()) + } else { + value = "" + } + case p == "openedtables": + value = fmt.Sprintf("%d", db.s.tops.cache.Size()) + case p == "alivesnaps": + value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) + case p == "aliveiters": + value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) + default: + err = ErrNotFound + } + + return +} + +// DBStats is database statistics. +type DBStats struct { + WriteDelayCount int32 + WriteDelayDuration time.Duration + WritePaused bool + + AliveSnapshots int32 + AliveIterators int32 + + IOWrite uint64 + IORead uint64 + + BlockCacheSize int + OpenedTablesCount int + + LevelSizes []int64 + LevelTablesCounts []int + LevelRead []int64 + LevelWrite []int64 + LevelDurations []time.Duration +} + +// Stats populates s with database statistics. +func (db *DB) Stats(s *DBStats) error { + err := db.ok() + if err != nil { + return err + } + + s.IORead = db.s.stor.reads() + s.IOWrite = db.s.stor.writes() + s.WriteDelayCount = atomic.LoadInt32(&db.cWriteDelayN) + s.WriteDelayDuration = time.Duration(atomic.LoadInt64(&db.cWriteDelay)) + s.WritePaused = atomic.LoadInt32(&db.inWritePaused) == 1 + + s.OpenedTablesCount = db.s.tops.cache.Size() + if db.s.tops.bcache != nil { + s.BlockCacheSize = db.s.tops.bcache.Size() + } else { + s.BlockCacheSize = 0 + } + + s.AliveIterators = atomic.LoadInt32(&db.aliveIters) + s.AliveSnapshots = atomic.LoadInt32(&db.aliveSnaps) + + s.LevelDurations = s.LevelDurations[:0] + s.LevelRead = s.LevelRead[:0] + s.LevelWrite = s.LevelWrite[:0] + s.LevelSizes = s.LevelSizes[:0] + s.LevelTablesCounts = s.LevelTablesCounts[:0] + + v := db.s.version() + defer v.release() + + for level, tables := range v.levels { + duration, read, write := db.compStats.getStat(level) + if len(tables) == 0 && duration == 0 { + continue + } + s.LevelDurations = append(s.LevelDurations, duration) + s.LevelRead = append(s.LevelRead, read) + s.LevelWrite = append(s.LevelWrite, write) + s.LevelSizes = append(s.LevelSizes, tables.size()) + s.LevelTablesCounts = append(s.LevelTablesCounts, len(tables)) + } + + return nil +} + +// SizeOf calculates approximate sizes of the given key ranges. +// The length of the returned sizes are equal with the length of the given +// ranges. The returned sizes measure storage space usage, so if the user +// data compresses by a factor of ten, the returned sizes will be one-tenth +// the size of the corresponding user data size. +// The results may not include the sizes of recently written data. +func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { + if err := db.ok(); err != nil { + return nil, err + } + + v := db.s.version() + defer v.release() + + sizes := make(Sizes, 0, len(ranges)) + for _, r := range ranges { + imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek) + imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek) + start, err := v.offsetOf(imin) + if err != nil { + return nil, err + } + limit, err := v.offsetOf(imax) + if err != nil { + return nil, err + } + var size int64 + if limit >= start { + size = limit - start + } + sizes = append(sizes, size) + } + + return sizes, nil +} + +// Close closes the DB. This will also releases any outstanding snapshot, +// abort any in-flight compaction and discard open transaction. +// +// It is not safe to close a DB until all outstanding iterators are released. +// It is valid to call Close multiple times. Other methods should not be +// called after the DB has been closed. +func (db *DB) Close() error { + if !db.setClosed() { + return ErrClosed + } + + start := time.Now() + db.log("db@close closing") + + // Clear the finalizer. + runtime.SetFinalizer(db, nil) + + // Get compaction error. + var err error + select { + case err = <-db.compErrC: + if err == ErrReadOnly { + err = nil + } + default: + } + + // Signal all goroutines. + close(db.closeC) + + // Discard open transaction. + if db.tr != nil { + db.tr.Discard() + } + + // Acquire writer lock. + db.writeLockC <- struct{}{} + + // Wait for all gorotines to exit. + db.closeW.Wait() + + // Closes journal. + if db.journal != nil { + db.journal.Close() + db.journalWriter.Close() + db.journal = nil + db.journalWriter = nil + } + + if db.writeDelayN > 0 { + db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) + } + + // Close session. + db.s.close() + db.logf("db@close done T·%v", time.Since(start)) + db.s.release() + + if db.closer != nil { + if err1 := db.closer.Close(); err == nil { + err = err1 + } + db.closer = nil + } + + // Clear memdbs. + db.clearMems() + + return err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go new file mode 100644 index 0000000000..0c1b9a53b8 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go @@ -0,0 +1,854 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync" + "time" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +var ( + errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") +) + +type cStat struct { + duration time.Duration + read int64 + write int64 +} + +func (p *cStat) add(n *cStatStaging) { + p.duration += n.duration + p.read += n.read + p.write += n.write +} + +func (p *cStat) get() (duration time.Duration, read, write int64) { + return p.duration, p.read, p.write +} + +type cStatStaging struct { + start time.Time + duration time.Duration + on bool + read int64 + write int64 +} + +func (p *cStatStaging) startTimer() { + if !p.on { + p.start = time.Now() + p.on = true + } +} + +func (p *cStatStaging) stopTimer() { + if p.on { + p.duration += time.Since(p.start) + p.on = false + } +} + +type cStats struct { + lk sync.Mutex + stats []cStat +} + +func (p *cStats) addStat(level int, n *cStatStaging) { + p.lk.Lock() + if level >= len(p.stats) { + newStats := make([]cStat, level+1) + copy(newStats, p.stats) + p.stats = newStats + } + p.stats[level].add(n) + p.lk.Unlock() +} + +func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) { + p.lk.Lock() + defer p.lk.Unlock() + if level < len(p.stats) { + return p.stats[level].get() + } + return +} + +func (db *DB) compactionError() { + var err error +noerr: + // No error. + for { + select { + case err = <-db.compErrSetC: + switch { + case err == nil: + case err == ErrReadOnly, errors.IsCorrupted(err): + goto hasperr + default: + goto haserr + } + case <-db.closeC: + return + } + } +haserr: + // Transient error. + for { + select { + case db.compErrC <- err: + case err = <-db.compErrSetC: + switch { + case err == nil: + goto noerr + case err == ErrReadOnly, errors.IsCorrupted(err): + goto hasperr + default: + } + case <-db.closeC: + return + } + } +hasperr: + // Persistent error. + for { + select { + case db.compErrC <- err: + case db.compPerErrC <- err: + case db.writeLockC <- struct{}{}: + // Hold write lock, so that write won't pass-through. + db.compWriteLocking = true + case <-db.closeC: + if db.compWriteLocking { + // We should release the lock or Close will hang. + <-db.writeLockC + } + return + } + } +} + +type compactionTransactCounter int + +func (cnt *compactionTransactCounter) incr() { + *cnt++ +} + +type compactionTransactInterface interface { + run(cnt *compactionTransactCounter) error + revert() error +} + +func (db *DB) compactionTransact(name string, t compactionTransactInterface) { + defer func() { + if x := recover(); x != nil { + if x == errCompactionTransactExiting { + if err := t.revert(); err != nil { + db.logf("%s revert error %q", name, err) + } + } + panic(x) + } + }() + + const ( + backoffMin = 1 * time.Second + backoffMax = 8 * time.Second + backoffMul = 2 * time.Second + ) + var ( + backoff = backoffMin + backoffT = time.NewTimer(backoff) + lastCnt = compactionTransactCounter(0) + + disableBackoff = db.s.o.GetDisableCompactionBackoff() + ) + for n := 0; ; n++ { + // Check whether the DB is closed. + if db.isClosed() { + db.logf("%s exiting", name) + db.compactionExitTransact() + } else if n > 0 { + db.logf("%s retrying N·%d", name, n) + } + + // Execute. + cnt := compactionTransactCounter(0) + err := t.run(&cnt) + if err != nil { + db.logf("%s error I·%d %q", name, cnt, err) + } + + // Set compaction error status. + select { + case db.compErrSetC <- err: + case perr := <-db.compPerErrC: + if err != nil { + db.logf("%s exiting (persistent error %q)", name, perr) + db.compactionExitTransact() + } + case <-db.closeC: + db.logf("%s exiting", name) + db.compactionExitTransact() + } + if err == nil { + return + } + if errors.IsCorrupted(err) { + db.logf("%s exiting (corruption detected)", name) + db.compactionExitTransact() + } + + if !disableBackoff { + // Reset backoff duration if counter is advancing. + if cnt > lastCnt { + backoff = backoffMin + lastCnt = cnt + } + + // Backoff. + backoffT.Reset(backoff) + if backoff < backoffMax { + backoff *= backoffMul + if backoff > backoffMax { + backoff = backoffMax + } + } + select { + case <-backoffT.C: + case <-db.closeC: + db.logf("%s exiting", name) + db.compactionExitTransact() + } + } + } +} + +type compactionTransactFunc struct { + runFunc func(cnt *compactionTransactCounter) error + revertFunc func() error +} + +func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error { + return t.runFunc(cnt) +} + +func (t *compactionTransactFunc) revert() error { + if t.revertFunc != nil { + return t.revertFunc() + } + return nil +} + +func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) { + db.compactionTransact(name, &compactionTransactFunc{run, revert}) +} + +func (db *DB) compactionExitTransact() { + panic(errCompactionTransactExiting) +} + +func (db *DB) compactionCommit(name string, rec *sessionRecord) { + db.compCommitLk.Lock() + defer db.compCommitLk.Unlock() // Defer is necessary. + db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error { + return db.s.commit(rec) + }, nil) +} + +func (db *DB) memCompaction() { + mdb := db.getFrozenMem() + if mdb == nil { + return + } + defer mdb.decref() + + db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size())) + + // Don't compact empty memdb. + if mdb.Len() == 0 { + db.logf("memdb@flush skipping") + // drop frozen memdb + db.dropFrozenMem() + return + } + + // Pause table compaction. + resumeC := make(chan struct{}) + select { + case db.tcompPauseC <- (chan<- struct{})(resumeC): + case <-db.compPerErrC: + close(resumeC) + resumeC = nil + case <-db.closeC: + db.compactionExitTransact() + } + + var ( + rec = &sessionRecord{} + stats = &cStatStaging{} + flushLevel int + ) + + // Generate tables. + db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) { + stats.startTimer() + flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel) + stats.stopTimer() + return + }, func() error { + for _, r := range rec.addedTables { + db.logf("memdb@flush revert @%d", r.num) + if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil { + return err + } + } + return nil + }) + + rec.setJournalNum(db.journalFd.Num) + rec.setSeqNum(db.frozenSeq) + + // Commit. + stats.startTimer() + db.compactionCommit("memdb", rec) + stats.stopTimer() + + db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration) + + for _, r := range rec.addedTables { + stats.write += r.size + } + db.compStats.addStat(flushLevel, stats) + + // Drop frozen memdb. + db.dropFrozenMem() + + // Resume table compaction. + if resumeC != nil { + select { + case <-resumeC: + close(resumeC) + case <-db.closeC: + db.compactionExitTransact() + } + } + + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) +} + +type tableCompactionBuilder struct { + db *DB + s *session + c *compaction + rec *sessionRecord + stat0, stat1 *cStatStaging + + snapHasLastUkey bool + snapLastUkey []byte + snapLastSeq uint64 + snapIter int + snapKerrCnt int + snapDropCnt int + + kerrCnt int + dropCnt int + + minSeq uint64 + strict bool + tableSize int + + tw *tWriter +} + +func (b *tableCompactionBuilder) appendKV(key, value []byte) error { + // Create new table if not already. + if b.tw == nil { + // Check for pause event. + if b.db != nil { + select { + case ch := <-b.db.tcompPauseC: + b.db.pauseCompaction(ch) + case <-b.db.closeC: + b.db.compactionExitTransact() + default: + } + } + + // Create new table. + var err error + b.tw, err = b.s.tops.create() + if err != nil { + return err + } + } + + // Write key/value into table. + return b.tw.append(key, value) +} + +func (b *tableCompactionBuilder) needFlush() bool { + return b.tw.tw.BytesLen() >= b.tableSize +} + +func (b *tableCompactionBuilder) flush() error { + t, err := b.tw.finish() + if err != nil { + return err + } + b.rec.addTableFile(b.c.sourceLevel+1, t) + b.stat1.write += t.size + b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) + b.tw = nil + return nil +} + +func (b *tableCompactionBuilder) cleanup() { + if b.tw != nil { + b.tw.drop() + b.tw = nil + } +} + +func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { + snapResumed := b.snapIter > 0 + hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary. + lastUkey := append([]byte{}, b.snapLastUkey...) + lastSeq := b.snapLastSeq + b.kerrCnt = b.snapKerrCnt + b.dropCnt = b.snapDropCnt + // Restore compaction state. + b.c.restore() + + defer b.cleanup() + + b.stat1.startTimer() + defer b.stat1.stopTimer() + + iter := b.c.newIterator() + defer iter.Release() + for i := 0; iter.Next(); i++ { + // Incr transact counter. + cnt.incr() + + // Skip until last state. + if i < b.snapIter { + continue + } + + resumed := false + if snapResumed { + resumed = true + snapResumed = false + } + + ikey := iter.Key() + ukey, seq, kt, kerr := parseInternalKey(ikey) + + if kerr == nil { + shouldStop := !resumed && b.c.shouldStopBefore(ikey) + + if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 { + // First occurrence of this user key. + + // Only rotate tables if ukey doesn't hop across. + if b.tw != nil && (shouldStop || b.needFlush()) { + if err := b.flush(); err != nil { + return err + } + + // Creates snapshot of the state. + b.c.save() + b.snapHasLastUkey = hasLastUkey + b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...) + b.snapLastSeq = lastSeq + b.snapIter = i + b.snapKerrCnt = b.kerrCnt + b.snapDropCnt = b.dropCnt + } + + hasLastUkey = true + lastUkey = append(lastUkey[:0], ukey...) + lastSeq = keyMaxSeq + } + + switch { + case lastSeq <= b.minSeq: + // Dropped because newer entry for same user key exist + fallthrough // (A) + case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): + // For this user key: + // (1) there is no data in higher levels + // (2) data in lower levels will have larger seq numbers + // (3) data in layers that are being compacted here and have + // smaller seq numbers will be dropped in the next + // few iterations of this loop (by rule (A) above). + // Therefore this deletion marker is obsolete and can be dropped. + lastSeq = seq + b.dropCnt++ + continue + default: + lastSeq = seq + } + } else { + if b.strict { + return kerr + } + + // Don't drop corrupted keys. + hasLastUkey = false + lastUkey = lastUkey[:0] + lastSeq = keyMaxSeq + b.kerrCnt++ + } + + if err := b.appendKV(ikey, iter.Value()); err != nil { + return err + } + } + + if err := iter.Error(); err != nil { + return err + } + + // Finish last table. + if b.tw != nil && !b.tw.empty() { + return b.flush() + } + return nil +} + +func (b *tableCompactionBuilder) revert() error { + for _, at := range b.rec.addedTables { + b.s.logf("table@build revert @%d", at.num) + if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil { + return err + } + } + return nil +} + +func (db *DB) tableCompaction(c *compaction, noTrivial bool) { + defer c.release() + + rec := &sessionRecord{} + rec.addCompPtr(c.sourceLevel, c.imax) + + if !noTrivial && c.trivial() { + t := c.levels[0][0] + db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1) + rec.delTable(c.sourceLevel, t.fd.Num) + rec.addTableFile(c.sourceLevel+1, t) + db.compactionCommit("table-move", rec) + return + } + + var stats [2]cStatStaging + for i, tables := range c.levels { + for _, t := range tables { + stats[i].read += t.size + // Insert deleted tables into record + rec.delTable(c.sourceLevel+i, t.fd.Num) + } + } + sourceSize := int(stats[0].read + stats[1].read) + minSeq := db.minSeq() + db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq) + + b := &tableCompactionBuilder{ + db: db, + s: db.s, + c: c, + rec: rec, + stat1: &stats[1], + minSeq: minSeq, + strict: db.s.o.GetStrict(opt.StrictCompaction), + tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1), + } + db.compactionTransact("table@build", b) + + // Commit. + stats[1].startTimer() + db.compactionCommit("table", rec) + stats[1].stopTimer() + + resultSize := int(stats[1].write) + db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) + + // Save compaction stats + for i := range stats { + db.compStats.addStat(c.sourceLevel+1, &stats[i]) + } +} + +func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error { + db.logf("table@compaction range L%d %q:%q", level, umin, umax) + if level >= 0 { + if c := db.s.getCompactionRange(level, umin, umax, true); c != nil { + db.tableCompaction(c, true) + } + } else { + // Retry until nothing to compact. + for { + compacted := false + + // Scan for maximum level with overlapped tables. + v := db.s.version() + m := 1 + for i := m; i < len(v.levels); i++ { + tables := v.levels[i] + if tables.overlaps(db.s.icmp, umin, umax, false) { + m = i + } + } + v.release() + + for level := 0; level < m; level++ { + if c := db.s.getCompactionRange(level, umin, umax, false); c != nil { + db.tableCompaction(c, true) + compacted = true + } + } + + if !compacted { + break + } + } + } + + return nil +} + +func (db *DB) tableAutoCompaction() { + if c := db.s.pickCompaction(); c != nil { + db.tableCompaction(c, false) + } +} + +func (db *DB) tableNeedCompaction() bool { + v := db.s.version() + defer v.release() + return v.needCompaction() +} + +// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted. +func (db *DB) resumeWrite() bool { + v := db.s.version() + defer v.release() + if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() { + return true + } + return false +} + +func (db *DB) pauseCompaction(ch chan<- struct{}) { + select { + case ch <- struct{}{}: + case <-db.closeC: + db.compactionExitTransact() + } +} + +type cCmd interface { + ack(err error) +} + +type cAuto struct { + // Note for table compaction, an non-empty ackC represents it's a compaction waiting command. + ackC chan<- error +} + +func (r cAuto) ack(err error) { + if r.ackC != nil { + defer func() { + recover() + }() + r.ackC <- err + } +} + +type cRange struct { + level int + min, max []byte + ackC chan<- error +} + +func (r cRange) ack(err error) { + if r.ackC != nil { + defer func() { + recover() + }() + r.ackC <- err + } +} + +// This will trigger auto compaction but will not wait for it. +func (db *DB) compTrigger(compC chan<- cCmd) { + select { + case compC <- cAuto{}: + default: + } +} + +// This will trigger auto compaction and/or wait for all compaction to be done. +func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) { + ch := make(chan error) + defer close(ch) + // Send cmd. + select { + case compC <- cAuto{ch}: + case err = <-db.compErrC: + return + case <-db.closeC: + return ErrClosed + } + // Wait cmd. + select { + case err = <-ch: + case err = <-db.compErrC: + case <-db.closeC: + return ErrClosed + } + return err +} + +// Send range compaction request. +func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) { + ch := make(chan error) + defer close(ch) + // Send cmd. + select { + case compC <- cRange{level, min, max, ch}: + case err := <-db.compErrC: + return err + case <-db.closeC: + return ErrClosed + } + // Wait cmd. + select { + case err = <-ch: + case err = <-db.compErrC: + case <-db.closeC: + return ErrClosed + } + return err +} + +func (db *DB) mCompaction() { + var x cCmd + + defer func() { + if x := recover(); x != nil { + if x != errCompactionTransactExiting { + panic(x) + } + } + if x != nil { + x.ack(ErrClosed) + } + db.closeW.Done() + }() + + for { + select { + case x = <-db.mcompCmdC: + switch x.(type) { + case cAuto: + db.memCompaction() + x.ack(nil) + x = nil + default: + panic("leveldb: unknown command") + } + case <-db.closeC: + return + } + } +} + +func (db *DB) tCompaction() { + var ( + x cCmd + waitQ []cCmd + ) + + defer func() { + if x := recover(); x != nil { + if x != errCompactionTransactExiting { + panic(x) + } + } + for i := range waitQ { + waitQ[i].ack(ErrClosed) + waitQ[i] = nil + } + if x != nil { + x.ack(ErrClosed) + } + db.closeW.Done() + }() + + for { + if db.tableNeedCompaction() { + select { + case x = <-db.tcompCmdC: + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + continue + case <-db.closeC: + return + default: + } + // Resume write operation as soon as possible. + if len(waitQ) > 0 && db.resumeWrite() { + for i := range waitQ { + waitQ[i].ack(nil) + waitQ[i] = nil + } + waitQ = waitQ[:0] + } + } else { + for i := range waitQ { + waitQ[i].ack(nil) + waitQ[i] = nil + } + waitQ = waitQ[:0] + select { + case x = <-db.tcompCmdC: + case ch := <-db.tcompPauseC: + db.pauseCompaction(ch) + continue + case <-db.closeC: + return + } + } + if x != nil { + switch cmd := x.(type) { + case cAuto: + if cmd.ackC != nil { + // Check the write pause state before caching it. + if db.resumeWrite() { + x.ack(nil) + } else { + waitQ = append(waitQ, x) + } + } + case cRange: + x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) + default: + panic("leveldb: unknown command") + } + x = nil + } + db.tableAutoCompaction() + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go new file mode 100644 index 0000000000..03c24cdab5 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go @@ -0,0 +1,360 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "math/rand" + "runtime" + "sync" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key") +) + +type memdbReleaser struct { + once sync.Once + m *memDB +} + +func (mr *memdbReleaser) Release() { + mr.once.Do(func() { + mr.m.decref() + }) +} + +func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) + em, fm := db.getMems() + v := db.s.version() + + tableIts := v.getIterators(slice, ro) + n := len(tableIts) + len(auxt) + 3 + its := make([]iterator.Iterator, 0, n) + + if auxm != nil { + ami := auxm.NewIterator(slice) + ami.SetReleaser(&memdbReleaser{m: auxm}) + its = append(its, ami) + } + for _, t := range auxt { + its = append(its, v.s.tops.newIterator(t, slice, ro)) + } + + emi := em.NewIterator(slice) + emi.SetReleaser(&memdbReleaser{m: em}) + its = append(its, emi) + if fm != nil { + fmi := fm.NewIterator(slice) + fmi.SetReleaser(&memdbReleaser{m: fm}) + its = append(its, fmi) + } + its = append(its, tableIts...) + mi := iterator.NewMergedIterator(its, db.s.icmp, strict) + mi.SetReleaser(&versionReleaser{v: v}) + return mi +} + +func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { + var islice *util.Range + if slice != nil { + islice = &util.Range{} + if slice.Start != nil { + islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek) + } + if slice.Limit != nil { + islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek) + } + } + rawIter := db.newRawIterator(auxm, auxt, islice, ro) + iter := &dbIter{ + db: db, + icmp: db.s.icmp, + iter: rawIter, + seq: seq, + strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), + key: make([]byte, 0), + value: make([]byte, 0), + } + atomic.AddInt32(&db.aliveIters, 1) + runtime.SetFinalizer(iter, (*dbIter).Release) + return iter +} + +func (db *DB) iterSamplingRate() int { + return rand.Intn(2 * db.s.o.GetIteratorSamplingRate()) +} + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +// dbIter represent an interator states over a database session. +type dbIter struct { + db *DB + icmp *iComparer + iter iterator.Iterator + seq uint64 + strict bool + + smaplingGap int + dir dir + key []byte + value []byte + err error + releaser util.Releaser +} + +func (i *dbIter) sampleSeek() { + ikey := i.iter.Key() + i.smaplingGap -= len(ikey) + len(i.iter.Value()) + for i.smaplingGap < 0 { + i.smaplingGap += i.db.iterSamplingRate() + i.db.sampleSeek(ikey) + } +} + +func (i *dbIter) setErr(err error) { + i.err = err + i.key = nil + i.value = nil +} + +func (i *dbIter) iterErr() { + if err := i.iter.Error(); err != nil { + i.setErr(err) + } +} + +func (i *dbIter) Valid() bool { + return i.err == nil && i.dir > dirEOI +} + +func (i *dbIter) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.iter.First() { + i.dir = dirSOI + return i.next() + } + i.dir = dirEOI + i.iterErr() + return false +} + +func (i *dbIter) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.iter.Last() { + return i.prev() + } + i.dir = dirSOI + i.iterErr() + return false +} + +func (i *dbIter) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek) + if i.iter.Seek(ikey) { + i.dir = dirSOI + return i.next() + } + i.dir = dirEOI + i.iterErr() + return false +} + +func (i *dbIter) next() bool { + for { + if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { + i.sampleSeek() + if seq <= i.seq { + switch kt { + case keyTypeDel: + // Skip deleted key. + i.key = append(i.key[:0], ukey...) + i.dir = dirForward + case keyTypeVal: + if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { + i.key = append(i.key[:0], ukey...) + i.value = append(i.value[:0], i.iter.Value()...) + i.dir = dirForward + return true + } + } + } + } else if i.strict { + i.setErr(kerr) + break + } + if !i.iter.Next() { + i.dir = dirEOI + i.iterErr() + break + } + } + return false +} + +func (i *dbIter) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { + i.dir = dirEOI + i.iterErr() + return false + } + return i.next() +} + +func (i *dbIter) prev() bool { + i.dir = dirBackward + del := true + if i.iter.Valid() { + for { + if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { + i.sampleSeek() + if seq <= i.seq { + if !del && i.icmp.uCompare(ukey, i.key) < 0 { + return true + } + del = (kt == keyTypeDel) + if !del { + i.key = append(i.key[:0], ukey...) + i.value = append(i.value[:0], i.iter.Value()...) + } + } + } else if i.strict { + i.setErr(kerr) + return false + } + if !i.iter.Prev() { + break + } + } + } + if del { + i.dir = dirSOI + i.iterErr() + return false + } + return true +} + +func (i *dbIter) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirEOI: + return i.Last() + case dirForward: + for i.iter.Prev() { + if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil { + i.sampleSeek() + if i.icmp.uCompare(ukey, i.key) < 0 { + goto cont + } + } else if i.strict { + i.setErr(kerr) + return false + } + } + i.dir = dirSOI + i.iterErr() + return false + } + +cont: + return i.prev() +} + +func (i *dbIter) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.key +} + +func (i *dbIter) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.value +} + +func (i *dbIter) Release() { + if i.dir != dirReleased { + // Clear the finalizer. + runtime.SetFinalizer(i, nil) + + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + + i.dir = dirReleased + i.key = nil + i.value = nil + i.iter.Release() + i.iter = nil + atomic.AddInt32(&i.db.aliveIters, -1) + i.db = nil + } +} + +func (i *dbIter) SetReleaser(releaser util.Releaser) { + if i.dir == dirReleased { + panic(util.ErrReleased) + } + if i.releaser != nil && releaser != nil { + panic(util.ErrHasReleaser) + } + i.releaser = releaser +} + +func (i *dbIter) Error() error { + return i.err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go new file mode 100644 index 0000000000..c2ad70c847 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go @@ -0,0 +1,187 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "container/list" + "fmt" + "runtime" + "sync" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type snapshotElement struct { + seq uint64 + ref int + e *list.Element +} + +// Acquires a snapshot, based on latest sequence. +func (db *DB) acquireSnapshot() *snapshotElement { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + + seq := db.getSeq() + + if e := db.snapsList.Back(); e != nil { + se := e.Value.(*snapshotElement) + if se.seq == seq { + se.ref++ + return se + } else if seq < se.seq { + panic("leveldb: sequence number is not increasing") + } + } + se := &snapshotElement{seq: seq, ref: 1} + se.e = db.snapsList.PushBack(se) + return se +} + +// Releases given snapshot element. +func (db *DB) releaseSnapshot(se *snapshotElement) { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + + se.ref-- + if se.ref == 0 { + db.snapsList.Remove(se.e) + se.e = nil + } else if se.ref < 0 { + panic("leveldb: Snapshot: negative element reference") + } +} + +// Gets minimum sequence that not being snapshotted. +func (db *DB) minSeq() uint64 { + db.snapsMu.Lock() + defer db.snapsMu.Unlock() + + if e := db.snapsList.Front(); e != nil { + return e.Value.(*snapshotElement).seq + } + + return db.getSeq() +} + +// Snapshot is a DB snapshot. +type Snapshot struct { + db *DB + elem *snapshotElement + mu sync.RWMutex + released bool +} + +// Creates new snapshot object. +func (db *DB) newSnapshot() *Snapshot { + snap := &Snapshot{ + db: db, + elem: db.acquireSnapshot(), + } + atomic.AddInt32(&db.aliveSnaps, 1) + runtime.SetFinalizer(snap, (*Snapshot).Release) + return snap +} + +func (snap *Snapshot) String() string { + return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) +} + +// Get gets the value for the given key. It returns ErrNotFound if +// the DB does not contains the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + err = snap.db.ok() + if err != nil { + return + } + snap.mu.RLock() + defer snap.mu.RUnlock() + if snap.released { + err = ErrSnapshotReleased + return + } + return snap.db.get(nil, nil, key, snap.elem.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Get returns. +func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { + err = snap.db.ok() + if err != nil { + return + } + snap.mu.RLock() + defer snap.mu.RUnlock() + if snap.released { + err = ErrSnapshotReleased + return + } + return snap.db.has(nil, nil, key, snap.elem.seq, ro) +} + +// NewIterator returns an iterator for the snapshot of the underlying DB. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. The resultant key/value pairs are guaranteed to be +// consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Value() methods), its content should not be +// modified unless noted otherwise. +// +// The iterator must be released after use, by calling Release method. +// Releasing the snapshot doesn't mean releasing the iterator too, the +// iterator would be still valid until released. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + if err := snap.db.ok(); err != nil { + return iterator.NewEmptyIterator(err) + } + snap.mu.Lock() + defer snap.mu.Unlock() + if snap.released { + return iterator.NewEmptyIterator(ErrSnapshotReleased) + } + // Since iterator already hold version ref, it doesn't need to + // hold snapshot ref. + return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro) +} + +// Release releases the snapshot. This will not release any returned +// iterators, the iterators would still be valid until released or the +// underlying DB is closed. +// +// Other methods should not be called after the snapshot has been released. +func (snap *Snapshot) Release() { + snap.mu.Lock() + defer snap.mu.Unlock() + + if !snap.released { + // Clear the finalizer. + runtime.SetFinalizer(snap, nil) + + snap.released = true + snap.db.releaseSnapshot(snap.elem) + atomic.AddInt32(&snap.db.aliveSnaps, -1) + snap.db = nil + snap.elem = nil + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go new file mode 100644 index 0000000000..65e1c54bb4 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go @@ -0,0 +1,239 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync/atomic" + "time" + + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +var ( + errHasFrozenMem = errors.New("has frozen mem") +) + +type memDB struct { + db *DB + *memdb.DB + ref int32 +} + +func (m *memDB) getref() int32 { + return atomic.LoadInt32(&m.ref) +} + +func (m *memDB) incref() { + atomic.AddInt32(&m.ref, 1) +} + +func (m *memDB) decref() { + if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { + // Only put back memdb with std capacity. + if m.Capacity() == m.db.s.o.GetWriteBuffer() { + m.Reset() + m.db.mpoolPut(m.DB) + } + m.db = nil + m.DB = nil + } else if ref < 0 { + panic("negative memdb ref") + } +} + +// Get latest sequence number. +func (db *DB) getSeq() uint64 { + return atomic.LoadUint64(&db.seq) +} + +// Atomically adds delta to seq. +func (db *DB) addSeq(delta uint64) { + atomic.AddUint64(&db.seq, delta) +} + +func (db *DB) setSeq(seq uint64) { + atomic.StoreUint64(&db.seq, seq) +} + +func (db *DB) sampleSeek(ikey internalKey) { + v := db.s.version() + if v.sampleSeek(ikey) { + // Trigger table compaction. + db.compTrigger(db.tcompCmdC) + } + v.release() +} + +func (db *DB) mpoolPut(mem *memdb.DB) { + if !db.isClosed() { + select { + case db.memPool <- mem: + default: + } + } +} + +func (db *DB) mpoolGet(n int) *memDB { + var mdb *memdb.DB + select { + case mdb = <-db.memPool: + default: + } + if mdb == nil || mdb.Capacity() < n { + mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) + } + return &memDB{ + db: db, + DB: mdb, + } +} + +func (db *DB) mpoolDrain() { + ticker := time.NewTicker(30 * time.Second) + for { + select { + case <-ticker.C: + select { + case <-db.memPool: + default: + } + case <-db.closeC: + ticker.Stop() + // Make sure the pool is drained. + select { + case <-db.memPool: + case <-time.After(time.Second): + } + close(db.memPool) + return + } + } +} + +// Create new memdb and froze the old one; need external synchronization. +// newMem only called synchronously by the writer. +func (db *DB) newMem(n int) (mem *memDB, err error) { + fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()} + w, err := db.s.stor.Create(fd) + if err != nil { + db.s.reuseFileNum(fd.Num) + return + } + + db.memMu.Lock() + defer db.memMu.Unlock() + + if db.frozenMem != nil { + return nil, errHasFrozenMem + } + + if db.journal == nil { + db.journal = journal.NewWriter(w) + } else { + db.journal.Reset(w) + db.journalWriter.Close() + db.frozenJournalFd = db.journalFd + } + db.journalWriter = w + db.journalFd = fd + db.frozenMem = db.mem + mem = db.mpoolGet(n) + mem.incref() // for self + mem.incref() // for caller + db.mem = mem + // The seq only incremented by the writer. And whoever called newMem + // should hold write lock, so no need additional synchronization here. + db.frozenSeq = db.seq + return +} + +// Get all memdbs. +func (db *DB) getMems() (e, f *memDB) { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.mem != nil { + db.mem.incref() + } else if !db.isClosed() { + panic("nil effective mem") + } + if db.frozenMem != nil { + db.frozenMem.incref() + } + return db.mem, db.frozenMem +} + +// Get effective memdb. +func (db *DB) getEffectiveMem() *memDB { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.mem != nil { + db.mem.incref() + } else if !db.isClosed() { + panic("nil effective mem") + } + return db.mem +} + +// Check whether we has frozen memdb. +func (db *DB) hasFrozenMem() bool { + db.memMu.RLock() + defer db.memMu.RUnlock() + return db.frozenMem != nil +} + +// Get frozen memdb. +func (db *DB) getFrozenMem() *memDB { + db.memMu.RLock() + defer db.memMu.RUnlock() + if db.frozenMem != nil { + db.frozenMem.incref() + } + return db.frozenMem +} + +// Drop frozen memdb; assume that frozen memdb isn't nil. +func (db *DB) dropFrozenMem() { + db.memMu.Lock() + if err := db.s.stor.Remove(db.frozenJournalFd); err != nil { + db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err) + } else { + db.logf("journal@remove removed @%d", db.frozenJournalFd.Num) + } + db.frozenJournalFd = storage.FileDesc{} + db.frozenMem.decref() + db.frozenMem = nil + db.memMu.Unlock() +} + +// Clear mems ptr; used by DB.Close(). +func (db *DB) clearMems() { + db.memMu.Lock() + db.mem = nil + db.frozenMem = nil + db.memMu.Unlock() +} + +// Set closed flag; return true if not already closed. +func (db *DB) setClosed() bool { + return atomic.CompareAndSwapUint32(&db.closed, 0, 1) +} + +// Check whether DB was closed. +func (db *DB) isClosed() bool { + return atomic.LoadUint32(&db.closed) != 0 +} + +// Check read ok status. +func (db *DB) ok() error { + if db.isClosed() { + return ErrClosed + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go new file mode 100644 index 0000000000..1a00001882 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go @@ -0,0 +1,329 @@ +// Copyright (c) 2016, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "errors" + "sync" + "time" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var errTransactionDone = errors.New("leveldb: transaction already closed") + +// Transaction is the transaction handle. +type Transaction struct { + db *DB + lk sync.RWMutex + seq uint64 + mem *memDB + tables tFiles + ikScratch []byte + rec sessionRecord + stats cStatStaging + closed bool +} + +// Get gets the value for the given key. It returns ErrNotFound if the +// DB does not contains the key. +// +// The returned slice is its own copy, it is safe to modify the contents +// of the returned slice. +// It is safe to modify the contents of the argument after Get returns. +func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return nil, errTransactionDone + } + return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro) +} + +// Has returns true if the DB does contains the given key. +// +// It is safe to modify the contents of the argument after Has returns. +func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return false, errTransactionDone + } + return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro) +} + +// NewIterator returns an iterator for the latest snapshot of the transaction. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently while writes to the +// transaction. The resultant key/value pairs are guaranteed to be consistent. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + tr.lk.RLock() + defer tr.lk.RUnlock() + if tr.closed { + return iterator.NewEmptyIterator(errTransactionDone) + } + tr.mem.incref() + return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro) +} + +func (tr *Transaction) flush() error { + // Flush memdb. + if tr.mem.Len() != 0 { + tr.stats.startTimer() + iter := tr.mem.NewIterator(nil) + t, n, err := tr.db.s.tops.createFrom(iter) + iter.Release() + tr.stats.stopTimer() + if err != nil { + return err + } + if tr.mem.getref() == 1 { + tr.mem.Reset() + } else { + tr.mem.decref() + tr.mem = tr.db.mpoolGet(0) + tr.mem.incref() + } + tr.tables = append(tr.tables, t) + tr.rec.addTableFile(0, t) + tr.stats.write += t.size + tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + } + return nil +} + +func (tr *Transaction) put(kt keyType, key, value []byte) error { + tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt) + if tr.mem.Free() < len(tr.ikScratch)+len(value) { + if err := tr.flush(); err != nil { + return err + } + } + if err := tr.mem.Put(tr.ikScratch, value); err != nil { + return err + } + tr.seq++ + return nil +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Put returns. +func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error { + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return tr.put(keyTypeVal, key, value) +} + +// Delete deletes the value for the given key. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error { + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return tr.put(keyTypeDel, key, nil) +} + +// Write apply the given batch to the transaction. The batch will be applied +// sequentially. +// Please note that the transaction is not compacted until committed, so if you +// writes 10 same keys, then those 10 same keys are in the transaction. +// +// It is safe to modify the contents of the arguments after Write returns. +func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error { + if b == nil || b.Len() == 0 { + return nil + } + + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + return b.replayInternal(func(i int, kt keyType, k, v []byte) error { + return tr.put(kt, k, v) + }) +} + +func (tr *Transaction) setDone() { + tr.closed = true + tr.db.tr = nil + tr.mem.decref() + <-tr.db.writeLockC +} + +// Commit commits the transaction. If error is not nil, then the transaction is +// not committed, it can then either be retried or discarded. +// +// Other methods should not be called after transaction has been committed. +func (tr *Transaction) Commit() error { + if err := tr.db.ok(); err != nil { + return err + } + + tr.lk.Lock() + defer tr.lk.Unlock() + if tr.closed { + return errTransactionDone + } + if err := tr.flush(); err != nil { + // Return error, lets user decide either to retry or discard + // transaction. + return err + } + if len(tr.tables) != 0 { + // Committing transaction. + tr.rec.setSeqNum(tr.seq) + tr.db.compCommitLk.Lock() + tr.stats.startTimer() + var cerr error + for retry := 0; retry < 3; retry++ { + cerr = tr.db.s.commit(&tr.rec) + if cerr != nil { + tr.db.logf("transaction@commit error R·%d %q", retry, cerr) + select { + case <-time.After(time.Second): + case <-tr.db.closeC: + tr.db.logf("transaction@commit exiting") + tr.db.compCommitLk.Unlock() + return cerr + } + } else { + // Success. Set db.seq. + tr.db.setSeq(tr.seq) + break + } + } + tr.stats.stopTimer() + if cerr != nil { + // Return error, lets user decide either to retry or discard + // transaction. + return cerr + } + + // Update compaction stats. This is safe as long as we hold compCommitLk. + tr.db.compStats.addStat(0, &tr.stats) + + // Trigger table auto-compaction. + tr.db.compTrigger(tr.db.tcompCmdC) + tr.db.compCommitLk.Unlock() + + // Additionally, wait compaction when certain threshold reached. + // Ignore error, returns error only if transaction can't be committed. + tr.db.waitCompaction() + } + // Only mark as done if transaction committed successfully. + tr.setDone() + return nil +} + +func (tr *Transaction) discard() { + // Discard transaction. + for _, t := range tr.tables { + tr.db.logf("transaction@discard @%d", t.fd.Num) + if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil { + tr.db.s.reuseFileNum(t.fd.Num) + } + } +} + +// Discard discards the transaction. +// +// Other methods should not be called after transaction has been discarded. +func (tr *Transaction) Discard() { + tr.lk.Lock() + if !tr.closed { + tr.discard() + tr.setDone() + } + tr.lk.Unlock() +} + +func (db *DB) waitCompaction() error { + if db.s.tLen(0) >= db.s.o.GetWriteL0PauseTrigger() { + return db.compTriggerWait(db.tcompCmdC) + } + return nil +} + +// OpenTransaction opens an atomic DB transaction. Only one transaction can be +// opened at a time. Subsequent call to Write and OpenTransaction will be blocked +// until in-flight transaction is committed or discarded. +// The returned transaction handle is safe for concurrent use. +// +// Transaction is expensive and can overwhelm compaction, especially if +// transaction size is small. Use with caution. +// +// The transaction must be closed once done, either by committing or discarding +// the transaction. +// Closing the DB will discard open transaction. +func (db *DB) OpenTransaction() (*Transaction, error) { + if err := db.ok(); err != nil { + return nil, err + } + + // The write happen synchronously. + select { + case db.writeLockC <- struct{}{}: + case err := <-db.compPerErrC: + return nil, err + case <-db.closeC: + return nil, ErrClosed + } + + if db.tr != nil { + panic("leveldb: has open transaction") + } + + // Flush current memdb. + if db.mem != nil && db.mem.Len() != 0 { + if _, err := db.rotateMem(0, true); err != nil { + return nil, err + } + } + + // Wait compaction when certain threshold reached. + if err := db.waitCompaction(); err != nil { + return nil, err + } + + tr := &Transaction{ + db: db, + seq: db.seq, + mem: db.mpoolGet(0), + } + tr.mem.incref() + db.tr = tr + return tr, nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go new file mode 100644 index 0000000000..3f0654894b --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go @@ -0,0 +1,102 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Reader is the interface that wraps basic Get and NewIterator methods. +// This interface implemented by both DB and Snapshot. +type Reader interface { + Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) + NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator +} + +// Sizes is list of size. +type Sizes []int64 + +// Sum returns sum of the sizes. +func (sizes Sizes) Sum() int64 { + var sum int64 + for _, size := range sizes { + sum += size + } + return sum +} + +// Logging. +func (db *DB) log(v ...interface{}) { db.s.log(v...) } +func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } + +// Check and clean files. +func (db *DB) checkAndCleanFiles() error { + v := db.s.version() + defer v.release() + + tmap := make(map[int64]bool) + for _, tables := range v.levels { + for _, t := range tables { + tmap[t.fd.Num] = false + } + } + + fds, err := db.s.stor.List(storage.TypeAll) + if err != nil { + return err + } + + var nt int + var rem []storage.FileDesc + for _, fd := range fds { + keep := true + switch fd.Type { + case storage.TypeManifest: + keep = fd.Num >= db.s.manifestFd.Num + case storage.TypeJournal: + if !db.frozenJournalFd.Zero() { + keep = fd.Num >= db.frozenJournalFd.Num + } else { + keep = fd.Num >= db.journalFd.Num + } + case storage.TypeTable: + _, keep = tmap[fd.Num] + if keep { + tmap[fd.Num] = true + nt++ + } + } + + if !keep { + rem = append(rem, fd) + } + } + + if nt != len(tmap) { + var mfds []storage.FileDesc + for num, present := range tmap { + if !present { + mfds = append(mfds, storage.FileDesc{Type: storage.TypeTable, Num: num}) + db.logf("db@janitor table missing @%d", num) + } + } + return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds}) + } + + db.logf("db@janitor F·%d G·%d", len(fds), len(rem)) + for _, fd := range rem { + db.logf("db@janitor removing %s-%d", fd.Type, fd.Num) + if err := db.s.stor.Remove(fd); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go new file mode 100644 index 0000000000..db0c1bece1 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go @@ -0,0 +1,464 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync/atomic" + "time" + + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +func (db *DB) writeJournal(batches []*Batch, seq uint64, sync bool) error { + wr, err := db.journal.Next() + if err != nil { + return err + } + if err := writeBatchesWithHeader(wr, batches, seq); err != nil { + return err + } + if err := db.journal.Flush(); err != nil { + return err + } + if sync { + return db.journalWriter.Sync() + } + return nil +} + +func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) { + retryLimit := 3 +retry: + // Wait for pending memdb compaction. + err = db.compTriggerWait(db.mcompCmdC) + if err != nil { + return + } + retryLimit-- + + // Create new memdb and journal. + mem, err = db.newMem(n) + if err != nil { + if err == errHasFrozenMem { + if retryLimit <= 0 { + panic("BUG: still has frozen memdb") + } + goto retry + } + return + } + + // Schedule memdb compaction. + if wait { + err = db.compTriggerWait(db.mcompCmdC) + } else { + db.compTrigger(db.mcompCmdC) + } + return +} + +func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { + delayed := false + slowdownTrigger := db.s.o.GetWriteL0SlowdownTrigger() + pauseTrigger := db.s.o.GetWriteL0PauseTrigger() + flush := func() (retry bool) { + mdb = db.getEffectiveMem() + if mdb == nil { + err = ErrClosed + return false + } + defer func() { + if retry { + mdb.decref() + mdb = nil + } + }() + tLen := db.s.tLen(0) + mdbFree = mdb.Free() + switch { + case tLen >= slowdownTrigger && !delayed: + delayed = true + time.Sleep(time.Millisecond) + case mdbFree >= n: + return false + case tLen >= pauseTrigger: + delayed = true + // Set the write paused flag explicitly. + atomic.StoreInt32(&db.inWritePaused, 1) + err = db.compTriggerWait(db.tcompCmdC) + // Unset the write paused flag. + atomic.StoreInt32(&db.inWritePaused, 0) + if err != nil { + return false + } + default: + // Allow memdb to grow if it has no entry. + if mdb.Len() == 0 { + mdbFree = n + } else { + mdb.decref() + mdb, err = db.rotateMem(n, false) + if err == nil { + mdbFree = mdb.Free() + } else { + mdbFree = 0 + } + } + return false + } + return true + } + start := time.Now() + for flush() { + } + if delayed { + db.writeDelay += time.Since(start) + db.writeDelayN++ + } else if db.writeDelayN > 0 { + db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) + atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN)) + atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay)) + db.writeDelay = 0 + db.writeDelayN = 0 + } + return +} + +type writeMerge struct { + sync bool + batch *Batch + keyType keyType + key, value []byte +} + +func (db *DB) unlockWrite(overflow bool, merged int, err error) { + for i := 0; i < merged; i++ { + db.writeAckC <- err + } + if overflow { + // Pass lock to the next write (that failed to merge). + db.writeMergedC <- false + } else { + // Release lock. + <-db.writeLockC + } +} + +// ourBatch is batch that we can modify. +func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error { + // Try to flush memdb. This method would also trying to throttle writes + // if it is too fast and compaction cannot catch-up. + mdb, mdbFree, err := db.flush(batch.internalLen) + if err != nil { + db.unlockWrite(false, 0, err) + return err + } + defer mdb.decref() + + var ( + overflow bool + merged int + batches = []*Batch{batch} + ) + + if merge { + // Merge limit. + var mergeLimit int + if batch.internalLen > 128<<10 { + mergeLimit = (1 << 20) - batch.internalLen + } else { + mergeLimit = 128 << 10 + } + mergeCap := mdbFree - batch.internalLen + if mergeLimit > mergeCap { + mergeLimit = mergeCap + } + + merge: + for mergeLimit > 0 { + select { + case incoming := <-db.writeMergeC: + if incoming.batch != nil { + // Merge batch. + if incoming.batch.internalLen > mergeLimit { + overflow = true + break merge + } + batches = append(batches, incoming.batch) + mergeLimit -= incoming.batch.internalLen + } else { + // Merge put. + internalLen := len(incoming.key) + len(incoming.value) + 8 + if internalLen > mergeLimit { + overflow = true + break merge + } + if ourBatch == nil { + ourBatch = db.batchPool.Get().(*Batch) + ourBatch.Reset() + batches = append(batches, ourBatch) + } + // We can use same batch since concurrent write doesn't + // guarantee write order. + ourBatch.appendRec(incoming.keyType, incoming.key, incoming.value) + mergeLimit -= internalLen + } + sync = sync || incoming.sync + merged++ + db.writeMergedC <- true + + default: + break merge + } + } + } + + // Release ourBatch if any. + if ourBatch != nil { + defer db.batchPool.Put(ourBatch) + } + + // Seq number. + seq := db.seq + 1 + + // Write journal. + if err := db.writeJournal(batches, seq, sync); err != nil { + db.unlockWrite(overflow, merged, err) + return err + } + + // Put batches. + for _, batch := range batches { + if err := batch.putMem(seq, mdb.DB); err != nil { + panic(err) + } + seq += uint64(batch.Len()) + } + + // Incr seq number. + db.addSeq(uint64(batchesLen(batches))) + + // Rotate memdb if it's reach the threshold. + if batch.internalLen >= mdbFree { + db.rotateMem(0, false) + } + + db.unlockWrite(overflow, merged, nil) + return nil +} + +// Write apply the given batch to the DB. The batch records will be applied +// sequentially. Write might be used concurrently, when used concurrently and +// batch is small enough, write will try to merge the batches. Set NoWriteMerge +// option to true to disable write merge. +// +// It is safe to modify the contents of the arguments after Write returns but +// not before. Write will not modify content of the batch. +func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error { + if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 { + return err + } + + // If the batch size is larger than write buffer, it may justified to write + // using transaction instead. Using transaction the batch will be written + // into tables directly, skipping the journaling. + if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() { + tr, err := db.OpenTransaction() + if err != nil { + return err + } + if err := tr.Write(batch, wo); err != nil { + tr.Discard() + return err + } + return tr.Commit() + } + + merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() + sync := wo.GetSync() && !db.s.o.GetNoSync() + + // Acquire write lock. + if merge { + select { + case db.writeMergeC <- writeMerge{sync: sync, batch: batch}: + if <-db.writeMergedC { + // Write is merged. + return <-db.writeAckC + } + // Write is not merged, the write lock is handed to us. Continue. + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } else { + select { + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } + + return db.writeLocked(batch, nil, merge, sync) +} + +func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error { + if err := db.ok(); err != nil { + return err + } + + merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() + sync := wo.GetSync() && !db.s.o.GetNoSync() + + // Acquire write lock. + if merge { + select { + case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}: + if <-db.writeMergedC { + // Write is merged. + return <-db.writeAckC + } + // Write is not merged, the write lock is handed to us. Continue. + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } else { + select { + case db.writeLockC <- struct{}{}: + // Write lock acquired. + case err := <-db.compPerErrC: + // Compaction error. + return err + case <-db.closeC: + // Closed + return ErrClosed + } + } + + batch := db.batchPool.Get().(*Batch) + batch.Reset() + batch.appendRec(kt, key, value) + return db.writeLocked(batch, batch, merge, sync) +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. Write merge also applies for Put, see +// Write. +// +// It is safe to modify the contents of the arguments after Put returns but not +// before. +func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { + return db.putRec(keyTypeVal, key, value, wo) +} + +// Delete deletes the value for the given key. Delete will not returns error if +// key doesn't exist. Write merge also applies for Delete, see Write. +// +// It is safe to modify the contents of the arguments after Delete returns but +// not before. +func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { + return db.putRec(keyTypeDel, key, nil, wo) +} + +func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { + iter := mem.NewIterator(nil) + defer iter.Release() + return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) && + (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0)) +} + +// CompactRange compacts the underlying DB for the given key range. +// In particular, deleted and overwritten versions are discarded, +// and the data is rearranged to reduce the cost of operations +// needed to access the data. This operation should typically only +// be invoked by users who understand the underlying implementation. +// +// A nil Range.Start is treated as a key before all keys in the DB. +// And a nil Range.Limit is treated as a key after all keys in the DB. +// Therefore if both is nil then it will compact entire DB. +func (db *DB) CompactRange(r util.Range) error { + if err := db.ok(); err != nil { + return err + } + + // Lock writer. + select { + case db.writeLockC <- struct{}{}: + case err := <-db.compPerErrC: + return err + case <-db.closeC: + return ErrClosed + } + + // Check for overlaps in memdb. + mdb := db.getEffectiveMem() + if mdb == nil { + return ErrClosed + } + defer mdb.decref() + if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) { + // Memdb compaction. + if _, err := db.rotateMem(0, false); err != nil { + <-db.writeLockC + return err + } + <-db.writeLockC + if err := db.compTriggerWait(db.mcompCmdC); err != nil { + return err + } + } else { + <-db.writeLockC + } + + // Table compaction. + return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit) +} + +// SetReadOnly makes DB read-only. It will stay read-only until reopened. +func (db *DB) SetReadOnly() error { + if err := db.ok(); err != nil { + return err + } + + // Lock writer. + select { + case db.writeLockC <- struct{}{}: + db.compWriteLocking = true + case err := <-db.compPerErrC: + return err + case <-db.closeC: + return ErrClosed + } + + // Set compaction read-only. + select { + case db.compErrSetC <- ErrReadOnly: + case perr := <-db.compPerErrC: + return perr + case <-db.closeC: + return ErrClosed + } + + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go new file mode 100644 index 0000000000..be768e5739 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go @@ -0,0 +1,92 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package leveldb provides implementation of LevelDB key/value database. +// +// Create or open a database: +// +// // The returned DB instance is safe for concurrent use. Which mean that all +// // DB's methods may be called concurrently from multiple goroutine. +// db, err := leveldb.OpenFile("path/to/db", nil) +// ... +// defer db.Close() +// ... +// +// Read or modify the database content: +// +// // Remember that the contents of the returned slice should not be modified. +// data, err := db.Get([]byte("key"), nil) +// ... +// err = db.Put([]byte("key"), []byte("value"), nil) +// ... +// err = db.Delete([]byte("key"), nil) +// ... +// +// Iterate over database content: +// +// iter := db.NewIterator(nil, nil) +// for iter.Next() { +// // Remember that the contents of the returned slice should not be modified, and +// // only valid until the next call to Next. +// key := iter.Key() +// value := iter.Value() +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Iterate over subset of database content with a particular prefix: +// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) +// for iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Seek-then-Iterate: +// +// iter := db.NewIterator(nil, nil) +// for ok := iter.Seek(key); ok; ok = iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Iterate over subset of database content: +// +// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) +// for iter.Next() { +// // Use key/value. +// ... +// } +// iter.Release() +// err = iter.Error() +// ... +// +// Batch writes: +// +// batch := new(leveldb.Batch) +// batch.Put([]byte("foo"), []byte("value")) +// batch.Put([]byte("bar"), []byte("another value")) +// batch.Delete([]byte("baz")) +// err = db.Write(batch, nil) +// ... +// +// Use bloom filter: +// +// o := &opt.Options{ +// Filter: filter.NewBloomFilter(10), +// } +// db, err := leveldb.OpenFile("path/to/db", o) +// ... +// defer db.Close() +// ... +package leveldb diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go new file mode 100644 index 0000000000..de2649812c --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go @@ -0,0 +1,20 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/errors" +) + +// Common errors. +var ( + ErrNotFound = errors.ErrNotFound + ErrReadOnly = errors.New("leveldb: read-only mode") + ErrSnapshotReleased = errors.New("leveldb: snapshot released") + ErrIterReleased = errors.New("leveldb: iterator released") + ErrClosed = errors.New("leveldb: closed") +) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go new file mode 100644 index 0000000000..8d6146b6f5 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go @@ -0,0 +1,78 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package errors provides common error types used throughout leveldb. +package errors + +import ( + "errors" + "fmt" + + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Common errors. +var ( + ErrNotFound = New("leveldb: not found") + ErrReleased = util.ErrReleased + ErrHasReleaser = util.ErrHasReleaser +) + +// New returns an error that formats as the given text. +func New(text string) error { + return errors.New(text) +} + +// ErrCorrupted is the type that wraps errors that indicate corruption in +// the database. +type ErrCorrupted struct { + Fd storage.FileDesc + Err error +} + +func (e *ErrCorrupted) Error() string { + if !e.Fd.Zero() { + return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) + } + return e.Err.Error() +} + +// NewErrCorrupted creates new ErrCorrupted error. +func NewErrCorrupted(fd storage.FileDesc, err error) error { + return &ErrCorrupted{fd, err} +} + +// IsCorrupted returns a boolean indicating whether the error is indicating +// a corruption. +func IsCorrupted(err error) bool { + switch err.(type) { + case *ErrCorrupted: + return true + case *storage.ErrCorrupted: + return true + } + return false +} + +// ErrMissingFiles is the type that indicating a corruption due to missing +// files. ErrMissingFiles always wrapped with ErrCorrupted. +type ErrMissingFiles struct { + Fds []storage.FileDesc +} + +func (e *ErrMissingFiles) Error() string { return "file missing" } + +// SetFd sets 'file info' of the given error with the given file. +// Currently only ErrCorrupted is supported, otherwise will do nothing. +func SetFd(err error, fd storage.FileDesc) error { + switch x := err.(type) { + case *ErrCorrupted: + x.Fd = fd + return x + } + return err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go new file mode 100644 index 0000000000..e961e420d3 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go @@ -0,0 +1,31 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/filter" +) + +type iFilter struct { + filter.Filter +} + +func (f iFilter) Contains(filter, key []byte) bool { + return f.Filter.Contains(filter, internalKey(key).ukey()) +} + +func (f iFilter) NewGenerator() filter.FilterGenerator { + return iFilterGenerator{f.Filter.NewGenerator()} +} + +type iFilterGenerator struct { + filter.FilterGenerator +} + +func (g iFilterGenerator) Add(key []byte) { + g.FilterGenerator.Add(internalKey(key).ukey()) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go new file mode 100644 index 0000000000..bab0e99705 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go @@ -0,0 +1,116 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package filter + +import ( + "github.com/syndtr/goleveldb/leveldb/util" +) + +func bloomHash(key []byte) uint32 { + return util.Hash(key, 0xbc9f1d34) +} + +type bloomFilter int + +// The bloom filter serializes its parameters and is backward compatible +// with respect to them. Therefor, its parameters are not added to its +// name. +func (bloomFilter) Name() string { + return "leveldb.BuiltinBloomFilter" +} + +func (f bloomFilter) Contains(filter, key []byte) bool { + nBytes := len(filter) - 1 + if nBytes < 1 { + return false + } + nBits := uint32(nBytes * 8) + + // Use the encoded k so that we can read filters generated by + // bloom filters created using different parameters. + k := filter[nBytes] + if k > 30 { + // Reserved for potentially new encodings for short bloom filters. + // Consider it a match. + return true + } + + kh := bloomHash(key) + delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits + for j := uint8(0); j < k; j++ { + bitpos := kh % nBits + if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { + return false + } + kh += delta + } + return true +} + +func (f bloomFilter) NewGenerator() FilterGenerator { + // Round down to reduce probing cost a little bit. + k := uint8(f * 69 / 100) // 0.69 =~ ln(2) + if k < 1 { + k = 1 + } else if k > 30 { + k = 30 + } + return &bloomFilterGenerator{ + n: int(f), + k: k, + } +} + +type bloomFilterGenerator struct { + n int + k uint8 + + keyHashes []uint32 +} + +func (g *bloomFilterGenerator) Add(key []byte) { + // Use double-hashing to generate a sequence of hash values. + // See analysis in [Kirsch,Mitzenmacher 2006]. + g.keyHashes = append(g.keyHashes, bloomHash(key)) +} + +func (g *bloomFilterGenerator) Generate(b Buffer) { + // Compute bloom filter size (in both bits and bytes) + nBits := uint32(len(g.keyHashes) * g.n) + // For small n, we can see a very high false positive rate. Fix it + // by enforcing a minimum bloom filter length. + if nBits < 64 { + nBits = 64 + } + nBytes := (nBits + 7) / 8 + nBits = nBytes * 8 + + dest := b.Alloc(int(nBytes) + 1) + dest[nBytes] = g.k + for _, kh := range g.keyHashes { + delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits + for j := uint8(0); j < g.k; j++ { + bitpos := kh % nBits + dest[bitpos/8] |= (1 << (bitpos % 8)) + kh += delta + } + } + + g.keyHashes = g.keyHashes[:0] +} + +// NewBloomFilter creates a new initialized bloom filter for given +// bitsPerKey. +// +// Since bitsPerKey is persisted individually for each bloom filter +// serialization, bloom filters are backwards compatible with respect to +// changing bitsPerKey. This means that no big performance penalty will +// be experienced when changing the parameter. See documentation for +// opt.Options.Filter for more information. +func NewBloomFilter(bitsPerKey int) Filter { + return bloomFilter(bitsPerKey) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go new file mode 100644 index 0000000000..7a925c5a86 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go @@ -0,0 +1,60 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package filter provides interface and implementation of probabilistic +// data structure. +// +// The filter is resposible for creating small filter from a set of keys. +// These filter will then used to test whether a key is a member of the set. +// In many cases, a filter can cut down the number of disk seeks from a +// handful to a single disk seek per DB.Get call. +package filter + +// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. +type Buffer interface { + // Alloc allocs n bytes of slice from the buffer. This also advancing + // write offset. + Alloc(n int) []byte + + // Write appends the contents of p to the buffer. + Write(p []byte) (n int, err error) + + // WriteByte appends the byte c to the buffer. + WriteByte(c byte) error +} + +// Filter is the filter. +type Filter interface { + // Name returns the name of this policy. + // + // Note that if the filter encoding changes in an incompatible way, + // the name returned by this method must be changed. Otherwise, old + // incompatible filters may be passed to methods of this type. + Name() string + + // NewGenerator creates a new filter generator. + NewGenerator() FilterGenerator + + // Contains returns true if the filter contains the given key. + // + // The filter are filters generated by the filter generator. + Contains(filter, key []byte) bool +} + +// FilterGenerator is the filter generator. +type FilterGenerator interface { + // Add adds a key to the filter generator. + // + // The key may become invalid after call to this method end, therefor + // key must be copied if implementation require keeping key for later + // use. The key should not modified directly, doing so may cause + // undefined results. + Add(key []byte) + + // Generate generates filters based on keys passed so far. After call + // to Generate the filter generator maybe resetted, depends on implementation. + Generate(b Buffer) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go new file mode 100644 index 0000000000..a23ab05f70 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go @@ -0,0 +1,184 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/syndtr/goleveldb/leveldb/util" +) + +// BasicArray is the interface that wraps basic Len and Search method. +type BasicArray interface { + // Len returns length of the array. + Len() int + + // Search finds smallest index that point to a key that is greater + // than or equal to the given key. + Search(key []byte) int +} + +// Array is the interface that wraps BasicArray and basic Index method. +type Array interface { + BasicArray + + // Index returns key/value pair with index of i. + Index(i int) (key, value []byte) +} + +// Array is the interface that wraps BasicArray and basic Get method. +type ArrayIndexer interface { + BasicArray + + // Get returns a new data iterator with index of i. + Get(i int) Iterator +} + +type basicArrayIterator struct { + util.BasicReleaser + array BasicArray + pos int + err error +} + +func (i *basicArrayIterator) Valid() bool { + return i.pos >= 0 && i.pos < i.array.Len() && !i.Released() +} + +func (i *basicArrayIterator) First() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + if i.array.Len() == 0 { + i.pos = -1 + return false + } + i.pos = 0 + return true +} + +func (i *basicArrayIterator) Last() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + n := i.array.Len() + if n == 0 { + i.pos = 0 + return false + } + i.pos = n - 1 + return true +} + +func (i *basicArrayIterator) Seek(key []byte) bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + n := i.array.Len() + if n == 0 { + i.pos = 0 + return false + } + i.pos = i.array.Search(key) + if i.pos >= n { + return false + } + return true +} + +func (i *basicArrayIterator) Next() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.pos++ + if n := i.array.Len(); i.pos >= n { + i.pos = n + return false + } + return true +} + +func (i *basicArrayIterator) Prev() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.pos-- + if i.pos < 0 { + i.pos = -1 + return false + } + return true +} + +func (i *basicArrayIterator) Error() error { return i.err } + +type arrayIterator struct { + basicArrayIterator + array Array + pos int + key, value []byte +} + +func (i *arrayIterator) updateKV() { + if i.pos == i.basicArrayIterator.pos { + return + } + i.pos = i.basicArrayIterator.pos + if i.Valid() { + i.key, i.value = i.array.Index(i.pos) + } else { + i.key = nil + i.value = nil + } +} + +func (i *arrayIterator) Key() []byte { + i.updateKV() + return i.key +} + +func (i *arrayIterator) Value() []byte { + i.updateKV() + return i.value +} + +type arrayIteratorIndexer struct { + basicArrayIterator + array ArrayIndexer +} + +func (i *arrayIteratorIndexer) Get() Iterator { + if i.Valid() { + return i.array.Get(i.basicArrayIterator.pos) + } + return nil +} + +// NewArrayIterator returns an iterator from the given array. +func NewArrayIterator(array Array) Iterator { + return &arrayIterator{ + basicArrayIterator: basicArrayIterator{array: array, pos: -1}, + array: array, + pos: -1, + } +} + +// NewArrayIndexer returns an index iterator from the given array. +func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { + return &arrayIteratorIndexer{ + basicArrayIterator: basicArrayIterator{array: array, pos: -1}, + array: array, + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go new file mode 100644 index 0000000000..939adbb933 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go @@ -0,0 +1,242 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// IteratorIndexer is the interface that wraps CommonIterator and basic Get +// method. IteratorIndexer provides index for indexed iterator. +type IteratorIndexer interface { + CommonIterator + + // Get returns a new data iterator for the current position, or nil if + // done. + Get() Iterator +} + +type indexedIterator struct { + util.BasicReleaser + index IteratorIndexer + strict bool + + data Iterator + err error + errf func(err error) + closed bool +} + +func (i *indexedIterator) setData() { + if i.data != nil { + i.data.Release() + } + i.data = i.index.Get() +} + +func (i *indexedIterator) clearData() { + if i.data != nil { + i.data.Release() + } + i.data = nil +} + +func (i *indexedIterator) indexErr() { + if err := i.index.Error(); err != nil { + if i.errf != nil { + i.errf(err) + } + i.err = err + } +} + +func (i *indexedIterator) dataErr() bool { + if err := i.data.Error(); err != nil { + if i.errf != nil { + i.errf(err) + } + if i.strict || !errors.IsCorrupted(err) { + i.err = err + return true + } + } + return false +} + +func (i *indexedIterator) Valid() bool { + return i.data != nil && i.data.Valid() +} + +func (i *indexedIterator) First() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + if !i.index.First() { + i.indexErr() + i.clearData() + return false + } + i.setData() + return i.Next() +} + +func (i *indexedIterator) Last() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + if !i.index.Last() { + i.indexErr() + i.clearData() + return false + } + i.setData() + if !i.data.Last() { + if i.dataErr() { + return false + } + i.clearData() + return i.Prev() + } + return true +} + +func (i *indexedIterator) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + if !i.index.Seek(key) { + i.indexErr() + i.clearData() + return false + } + i.setData() + if !i.data.Seek(key) { + if i.dataErr() { + return false + } + i.clearData() + return i.Next() + } + return true +} + +func (i *indexedIterator) Next() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + switch { + case i.data != nil && !i.data.Next(): + if i.dataErr() { + return false + } + i.clearData() + fallthrough + case i.data == nil: + if !i.index.Next() { + i.indexErr() + return false + } + i.setData() + return i.Next() + } + return true +} + +func (i *indexedIterator) Prev() bool { + if i.err != nil { + return false + } else if i.Released() { + i.err = ErrIterReleased + return false + } + + switch { + case i.data != nil && !i.data.Prev(): + if i.dataErr() { + return false + } + i.clearData() + fallthrough + case i.data == nil: + if !i.index.Prev() { + i.indexErr() + return false + } + i.setData() + if !i.data.Last() { + if i.dataErr() { + return false + } + i.clearData() + return i.Prev() + } + } + return true +} + +func (i *indexedIterator) Key() []byte { + if i.data == nil { + return nil + } + return i.data.Key() +} + +func (i *indexedIterator) Value() []byte { + if i.data == nil { + return nil + } + return i.data.Value() +} + +func (i *indexedIterator) Release() { + i.clearData() + i.index.Release() + i.BasicReleaser.Release() +} + +func (i *indexedIterator) Error() error { + if i.err != nil { + return i.err + } + if err := i.index.Error(); err != nil { + return err + } + return nil +} + +func (i *indexedIterator) SetErrorCallback(f func(err error)) { + i.errf = f +} + +// NewIndexedIterator returns an 'indexed iterator'. An index is iterator +// that returns another iterator, a 'data iterator'. A 'data iterator' is the +// iterator that contains actual key/value pairs. +// +// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) +// won't be ignored and will halt 'indexed iterator', otherwise the iterator will +// continue to the next 'data iterator'. Corruption on 'index iterator' will not be +// ignored and will halt the iterator. +func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator { + return &indexedIterator{index: index, strict: strict} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go new file mode 100644 index 0000000000..96fb0f6859 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go @@ -0,0 +1,132 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package iterator provides interface and implementation to traverse over +// contents of a database. +package iterator + +import ( + "errors" + + "github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + ErrIterReleased = errors.New("leveldb/iterator: iterator released") +) + +// IteratorSeeker is the interface that wraps the 'seeks method'. +type IteratorSeeker interface { + // First moves the iterator to the first key/value pair. If the iterator + // only contains one key/value pair then First and Last would moves + // to the same key/value pair. + // It returns whether such pair exist. + First() bool + + // Last moves the iterator to the last key/value pair. If the iterator + // only contains one key/value pair then First and Last would moves + // to the same key/value pair. + // It returns whether such pair exist. + Last() bool + + // Seek moves the iterator to the first key/value pair whose key is greater + // than or equal to the given key. + // It returns whether such pair exist. + // + // It is safe to modify the contents of the argument after Seek returns. + Seek(key []byte) bool + + // Next moves the iterator to the next key/value pair. + // It returns false if the iterator is exhausted. + Next() bool + + // Prev moves the iterator to the previous key/value pair. + // It returns false if the iterator is exhausted. + Prev() bool +} + +// CommonIterator is the interface that wraps common iterator methods. +type CommonIterator interface { + IteratorSeeker + + // util.Releaser is the interface that wraps basic Release method. + // When called Release will releases any resources associated with the + // iterator. + util.Releaser + + // util.ReleaseSetter is the interface that wraps the basic SetReleaser + // method. + util.ReleaseSetter + + // TODO: Remove this when ready. + Valid() bool + + // Error returns any accumulated error. Exhausting all the key/value pairs + // is not considered to be an error. + Error() error +} + +// Iterator iterates over a DB's key/value pairs in key order. +// +// When encounter an error any 'seeks method' will return false and will +// yield no key/value pairs. The error can be queried by calling the Error +// method. Calling Release is still necessary. +// +// An iterator must be released after use, but it is not necessary to read +// an iterator until exhaustion. +// Also, an iterator is not necessarily safe for concurrent use, but it is +// safe to use multiple iterators concurrently, with each in a dedicated +// goroutine. +type Iterator interface { + CommonIterator + + // Key returns the key of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to any 'seeks method'. + Key() []byte + + // Value returns the value of the current key/value pair, or nil if done. + // The caller should not modify the contents of the returned slice, and + // its contents may change on the next call to any 'seeks method'. + Value() []byte +} + +// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback +// method. +// +// ErrorCallbackSetter implemented by indexed and merged iterator. +type ErrorCallbackSetter interface { + // SetErrorCallback allows set an error callback of the corresponding + // iterator. Use nil to clear the callback. + SetErrorCallback(f func(err error)) +} + +type emptyIterator struct { + util.BasicReleaser + err error +} + +func (i *emptyIterator) rErr() { + if i.err == nil && i.Released() { + i.err = ErrIterReleased + } +} + +func (*emptyIterator) Valid() bool { return false } +func (i *emptyIterator) First() bool { i.rErr(); return false } +func (i *emptyIterator) Last() bool { i.rErr(); return false } +func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } +func (i *emptyIterator) Next() bool { i.rErr(); return false } +func (i *emptyIterator) Prev() bool { i.rErr(); return false } +func (*emptyIterator) Key() []byte { return nil } +func (*emptyIterator) Value() []byte { return nil } +func (i *emptyIterator) Error() error { return i.err } + +// NewEmptyIterator creates an empty iterator. The err parameter can be +// nil, but if not nil the given err will be returned by Error method. +func NewEmptyIterator(err error) Iterator { + return &emptyIterator{err: err} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go new file mode 100644 index 0000000000..1a7e29df8f --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go @@ -0,0 +1,304 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator + +import ( + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +type mergedIterator struct { + cmp comparer.Comparer + iters []Iterator + strict bool + + keys [][]byte + index int + dir dir + err error + errf func(err error) + releaser util.Releaser +} + +func assertKey(key []byte) []byte { + if key == nil { + panic("leveldb/iterator: nil key") + } + return key +} + +func (i *mergedIterator) iterErr(iter Iterator) bool { + if err := iter.Error(); err != nil { + if i.errf != nil { + i.errf(err) + } + if i.strict || !errors.IsCorrupted(err) { + i.err = err + return true + } + } + return false +} + +func (i *mergedIterator) Valid() bool { + return i.err == nil && i.dir > dirEOI +} + +func (i *mergedIterator) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.First(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirSOI + return i.next() +} + +func (i *mergedIterator) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.Last(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirEOI + return i.prev() +} + +func (i *mergedIterator) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + for x, iter := range i.iters { + switch { + case iter.Seek(key): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + i.dir = dirSOI + return i.next() +} + +func (i *mergedIterator) next() bool { + var key []byte + if i.dir == dirForward { + key = i.keys[i.index] + } + for x, tkey := range i.keys { + if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { + key = tkey + i.index = x + } + } + if key == nil { + i.dir = dirEOI + return false + } + i.dir = dirForward + return true +} + +func (i *mergedIterator) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirSOI: + return i.First() + case dirBackward: + key := append([]byte{}, i.keys[i.index]...) + if !i.Seek(key) { + return false + } + return i.Next() + } + + x := i.index + iter := i.iters[x] + switch { + case iter.Next(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + return i.next() +} + +func (i *mergedIterator) prev() bool { + var key []byte + if i.dir == dirBackward { + key = i.keys[i.index] + } + for x, tkey := range i.keys { + if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { + key = tkey + i.index = x + } + } + if key == nil { + i.dir = dirSOI + return false + } + i.dir = dirBackward + return true +} + +func (i *mergedIterator) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + switch i.dir { + case dirEOI: + return i.Last() + case dirForward: + key := append([]byte{}, i.keys[i.index]...) + for x, iter := range i.iters { + if x == i.index { + continue + } + seek := iter.Seek(key) + switch { + case seek && iter.Prev(), !seek && iter.Last(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + } + } + + x := i.index + iter := i.iters[x] + switch { + case iter.Prev(): + i.keys[x] = assertKey(iter.Key()) + case i.iterErr(iter): + return false + default: + i.keys[x] = nil + } + return i.prev() +} + +func (i *mergedIterator) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.keys[i.index] +} + +func (i *mergedIterator) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.iters[i.index].Value() +} + +func (i *mergedIterator) Release() { + if i.dir != dirReleased { + i.dir = dirReleased + for _, iter := range i.iters { + iter.Release() + } + i.iters = nil + i.keys = nil + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + } +} + +func (i *mergedIterator) SetReleaser(releaser util.Releaser) { + if i.dir == dirReleased { + panic(util.ErrReleased) + } + if i.releaser != nil && releaser != nil { + panic(util.ErrHasReleaser) + } + i.releaser = releaser +} + +func (i *mergedIterator) Error() error { + return i.err +} + +func (i *mergedIterator) SetErrorCallback(f func(err error)) { + i.errf = f +} + +// NewMergedIterator returns an iterator that merges its input. Walking the +// resultant iterator will return all key/value pairs of all input iterators +// in strictly increasing key order, as defined by cmp. +// The input's key ranges may overlap, but there are assumed to be no duplicate +// keys: if iters[i] contains a key k then iters[j] will not contain that key k. +// None of the iters may be nil. +// +// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) +// won't be ignored and will halt 'merged iterator', otherwise the iterator will +// continue to the next 'input iterator'. +func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { + return &mergedIterator{ + iters: iters, + cmp: cmp, + strict: strict, + keys: make([][]byte, len(iters)), + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go new file mode 100644 index 0000000000..d094c3d0f8 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go @@ -0,0 +1,524 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 +// License, authors and contributors informations can be found at bellow URLs respectively: +// https://code.google.com/p/leveldb-go/source/browse/LICENSE +// https://code.google.com/p/leveldb-go/source/browse/AUTHORS +// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS + +// Package journal reads and writes sequences of journals. Each journal is a stream +// of bytes that completes before the next journal starts. +// +// When reading, call Next to obtain an io.Reader for the next journal. Next will +// return io.EOF when there are no more journals. It is valid to call Next +// without reading the current journal to exhaustion. +// +// When writing, call Next to obtain an io.Writer for the next journal. Calling +// Next finishes the current journal. Call Close to finish the final journal. +// +// Optionally, call Flush to finish the current journal and flush the underlying +// writer without starting a new journal. To start a new journal after flushing, +// call Next. +// +// Neither Readers or Writers are safe to use concurrently. +// +// Example code: +// func read(r io.Reader) ([]string, error) { +// var ss []string +// journals := journal.NewReader(r, nil, true, true) +// for { +// j, err := journals.Next() +// if err == io.EOF { +// break +// } +// if err != nil { +// return nil, err +// } +// s, err := ioutil.ReadAll(j) +// if err != nil { +// return nil, err +// } +// ss = append(ss, string(s)) +// } +// return ss, nil +// } +// +// func write(w io.Writer, ss []string) error { +// journals := journal.NewWriter(w) +// for _, s := range ss { +// j, err := journals.Next() +// if err != nil { +// return err +// } +// if _, err := j.Write([]byte(s)), err != nil { +// return err +// } +// } +// return journals.Close() +// } +// +// The wire format is that the stream is divided into 32KiB blocks, and each +// block contains a number of tightly packed chunks. Chunks cannot cross block +// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a +// block must be zero. +// +// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 +// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) +// followed by a payload. The checksum is over the chunk type and the payload. +// +// There are four chunk types: whether the chunk is the full journal, or the +// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal +// has one first chunk, zero or more middle chunks, and one last chunk. +// +// The wire format allows for limited recovery in the face of data corruption: +// on a format error (such as a checksum mismatch), the reader moves to the +// next block and looks for the next full or first chunk. +package journal + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// These constants are part of the wire format and should not be changed. +const ( + fullChunkType = 1 + firstChunkType = 2 + middleChunkType = 3 + lastChunkType = 4 +) + +const ( + blockSize = 32 * 1024 + headerSize = 7 +) + +type flusher interface { + Flush() error +} + +// ErrCorrupted is the error type that generated by corrupted block or chunk. +type ErrCorrupted struct { + Size int + Reason string +} + +func (e *ErrCorrupted) Error() string { + return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) +} + +// Dropper is the interface that wrap simple Drop method. The Drop +// method will be called when the journal reader dropping a block or chunk. +type Dropper interface { + Drop(err error) +} + +// Reader reads journals from an underlying io.Reader. +type Reader struct { + // r is the underlying reader. + r io.Reader + // the dropper. + dropper Dropper + // strict flag. + strict bool + // checksum flag. + checksum bool + // seq is the sequence number of the current journal. + seq int + // buf[i:j] is the unread portion of the current chunk's payload. + // The low bound, i, excludes the chunk header. + i, j int + // n is the number of bytes of buf that are valid. Once reading has started, + // only the final block can have n < blockSize. + n int + // last is whether the current chunk is the last chunk of the journal. + last bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewReader returns a new reader. The dropper may be nil, and if +// strict is true then corrupted or invalid chunk will halt the journal +// reader entirely. +func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { + return &Reader{ + r: r, + dropper: dropper, + strict: strict, + checksum: checksum, + last: true, + } +} + +var errSkip = errors.New("leveldb/journal: skipped") + +func (r *Reader) corrupt(n int, reason string, skip bool) error { + if r.dropper != nil { + r.dropper.Drop(&ErrCorrupted{n, reason}) + } + if r.strict && !skip { + r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason}) + return r.err + } + return errSkip +} + +// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the +// next block into the buffer if necessary. +func (r *Reader) nextChunk(first bool) error { + for { + if r.j+headerSize <= r.n { + checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) + length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) + chunkType := r.buf[r.j+6] + unprocBlock := r.n - r.j + if checksum == 0 && length == 0 && chunkType == 0 { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, "zero header", false) + } + if chunkType < fullChunkType || chunkType > lastChunkType { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, fmt.Sprintf("invalid chunk type %#x", chunkType), false) + } + r.i = r.j + headerSize + r.j = r.j + headerSize + int(length) + if r.j > r.n { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, "chunk length overflows block", false) + } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { + // Drop entire block. + r.i = r.n + r.j = r.n + return r.corrupt(unprocBlock, "checksum mismatch", false) + } + if first && chunkType != fullChunkType && chunkType != firstChunkType { + chunkLength := (r.j - r.i) + headerSize + r.i = r.j + // Report the error, but skip it. + return r.corrupt(chunkLength, "orphan chunk", true) + } + r.last = chunkType == fullChunkType || chunkType == lastChunkType + return nil + } + + // The last block. + if r.n < blockSize && r.n > 0 { + if !first { + return r.corrupt(0, "missing chunk part", false) + } + r.err = io.EOF + return r.err + } + + // Read block. + n, err := io.ReadFull(r.r, r.buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return err + } + if n == 0 { + if !first { + return r.corrupt(0, "missing chunk part", false) + } + r.err = io.EOF + return r.err + } + r.i, r.j, r.n = 0, 0, n + } +} + +// Next returns a reader for the next journal. It returns io.EOF if there are no +// more journals. The reader returned becomes stale after the next Next call, +// and should no longer be used. If strict is false, the reader will returns +// io.ErrUnexpectedEOF error when found corrupted journal. +func (r *Reader) Next() (io.Reader, error) { + r.seq++ + if r.err != nil { + return nil, r.err + } + r.i = r.j + for { + if err := r.nextChunk(true); err == nil { + break + } else if err != errSkip { + return nil, err + } + } + return &singleReader{r, r.seq, nil}, nil +} + +// Reset resets the journal reader, allows reuse of the journal reader. Reset returns +// last accumulated error. +func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { + r.seq++ + err := r.err + r.r = reader + r.dropper = dropper + r.strict = strict + r.checksum = checksum + r.i = 0 + r.j = 0 + r.n = 0 + r.last = true + r.err = nil + return err +} + +type singleReader struct { + r *Reader + seq int + err error +} + +func (x *singleReader) Read(p []byte) (int, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/journal: stale reader") + } + if x.err != nil { + return 0, x.err + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + x.err = r.nextChunk(false) + if x.err != nil { + if x.err == errSkip { + x.err = io.ErrUnexpectedEOF + } + return 0, x.err + } + } + n := copy(p, r.buf[r.i:r.j]) + r.i += n + return n, nil +} + +func (x *singleReader) ReadByte() (byte, error) { + r := x.r + if r.seq != x.seq { + return 0, errors.New("leveldb/journal: stale reader") + } + if x.err != nil { + return 0, x.err + } + if r.err != nil { + return 0, r.err + } + for r.i == r.j { + if r.last { + return 0, io.EOF + } + x.err = r.nextChunk(false) + if x.err != nil { + if x.err == errSkip { + x.err = io.ErrUnexpectedEOF + } + return 0, x.err + } + } + c := r.buf[r.i] + r.i++ + return c, nil +} + +// Writer writes journals to an underlying io.Writer. +type Writer struct { + // w is the underlying writer. + w io.Writer + // seq is the sequence number of the current journal. + seq int + // f is w as a flusher. + f flusher + // buf[i:j] is the bytes that will become the current chunk. + // The low bound, i, includes the chunk header. + i, j int + // buf[:written] has already been written to w. + // written is zero unless Flush has been called. + written int + // first is whether the current chunk is the first chunk of the journal. + first bool + // pending is whether a chunk is buffered but not yet written. + pending bool + // err is any accumulated error. + err error + // buf is the buffer. + buf [blockSize]byte +} + +// NewWriter returns a new Writer. +func NewWriter(w io.Writer) *Writer { + f, _ := w.(flusher) + return &Writer{ + w: w, + f: f, + } +} + +// fillHeader fills in the header for the pending chunk. +func (w *Writer) fillHeader(last bool) { + if w.i+headerSize > w.j || w.j > blockSize { + panic("leveldb/journal: bad writer state") + } + if last { + if w.first { + w.buf[w.i+6] = fullChunkType + } else { + w.buf[w.i+6] = lastChunkType + } + } else { + if w.first { + w.buf[w.i+6] = firstChunkType + } else { + w.buf[w.i+6] = middleChunkType + } + } + binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) + binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) +} + +// writeBlock writes the buffered block to the underlying writer, and reserves +// space for the next chunk's header. +func (w *Writer) writeBlock() { + _, w.err = w.w.Write(w.buf[w.written:]) + w.i = 0 + w.j = headerSize + w.written = 0 +} + +// writePending finishes the current journal and writes the buffer to the +// underlying writer. +func (w *Writer) writePending() { + if w.err != nil { + return + } + if w.pending { + w.fillHeader(true) + w.pending = false + } + _, w.err = w.w.Write(w.buf[w.written:w.j]) + w.written = w.j +} + +// Close finishes the current journal and closes the writer. +func (w *Writer) Close() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + w.err = errors.New("leveldb/journal: closed Writer") + return nil +} + +// Flush finishes the current journal, writes to the underlying writer, and +// flushes it if that writer implements interface{ Flush() error }. +func (w *Writer) Flush() error { + w.seq++ + w.writePending() + if w.err != nil { + return w.err + } + if w.f != nil { + w.err = w.f.Flush() + return w.err + } + return nil +} + +// Reset resets the journal writer, allows reuse of the journal writer. Reset +// will also closes the journal writer if not already. +func (w *Writer) Reset(writer io.Writer) (err error) { + w.seq++ + if w.err == nil { + w.writePending() + err = w.err + } + w.w = writer + w.f, _ = writer.(flusher) + w.i = 0 + w.j = 0 + w.written = 0 + w.first = false + w.pending = false + w.err = nil + return +} + +// Next returns a writer for the next journal. The writer returned becomes stale +// after the next Close, Flush or Next call, and should no longer be used. +func (w *Writer) Next() (io.Writer, error) { + w.seq++ + if w.err != nil { + return nil, w.err + } + if w.pending { + w.fillHeader(true) + } + w.i = w.j + w.j = w.j + headerSize + // Check if there is room in the block for the header. + if w.j > blockSize { + // Fill in the rest of the block with zeroes. + for k := w.i; k < blockSize; k++ { + w.buf[k] = 0 + } + w.writeBlock() + if w.err != nil { + return nil, w.err + } + } + w.first = true + w.pending = true + return singleWriter{w, w.seq}, nil +} + +type singleWriter struct { + w *Writer + seq int +} + +func (x singleWriter) Write(p []byte) (int, error) { + w := x.w + if w.seq != x.seq { + return 0, errors.New("leveldb/journal: stale writer") + } + if w.err != nil { + return 0, w.err + } + n0 := len(p) + for len(p) > 0 { + // Write a block, if it is full. + if w.j == blockSize { + w.fillHeader(false) + w.writeBlock() + if w.err != nil { + return 0, w.err + } + w.first = false + } + // Copy bytes into the buffer. + n := copy(w.buf[w.j:], p) + w.j += n + p = p[n:] + } + return n0, nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/vendor/github.com/syndtr/goleveldb/leveldb/key.go new file mode 100644 index 0000000000..ad8f51ec85 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/key.go @@ -0,0 +1,143 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "encoding/binary" + "fmt" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// ErrInternalKeyCorrupted records internal key corruption. +type ErrInternalKeyCorrupted struct { + Ikey []byte + Reason string +} + +func (e *ErrInternalKeyCorrupted) Error() string { + return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason) +} + +func newErrInternalKeyCorrupted(ikey []byte, reason string) error { + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason}) +} + +type keyType uint + +func (kt keyType) String() string { + switch kt { + case keyTypeDel: + return "d" + case keyTypeVal: + return "v" + } + return fmt.Sprintf("", uint(kt)) +} + +// Value types encoded as the last component of internal keys. +// Don't modify; this value are saved to disk. +const ( + keyTypeDel = keyType(0) + keyTypeVal = keyType(1) +) + +// keyTypeSeek defines the keyType that should be passed when constructing an +// internal key for seeking to a particular sequence number (since we +// sort sequence numbers in decreasing order and the value type is +// embedded as the low 8 bits in the sequence number in internal keys, +// we need to use the highest-numbered ValueType, not the lowest). +const keyTypeSeek = keyTypeVal + +const ( + // Maximum value possible for sequence number; the 8-bits are + // used by value type, so its can packed together in single + // 64-bit integer. + keyMaxSeq = (uint64(1) << 56) - 1 + // Maximum value possible for packed sequence number and type. + keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek) +) + +// Maximum number encoded in bytes. +var keyMaxNumBytes = make([]byte, 8) + +func init() { + binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum) +} + +type internalKey []byte + +func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey { + if seq > keyMaxSeq { + panic("leveldb: invalid sequence number") + } else if kt > keyTypeVal { + panic("leveldb: invalid type") + } + + dst = ensureBuffer(dst, len(ukey)+8) + copy(dst, ukey) + binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt)) + return internalKey(dst) +} + +func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) { + if len(ik) < 8 { + return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length") + } + num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) + seq, kt = uint64(num>>8), keyType(num&0xff) + if kt > keyTypeVal { + return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type") + } + ukey = ik[:len(ik)-8] + return +} + +func validInternalKey(ik []byte) bool { + _, _, _, err := parseInternalKey(ik) + return err == nil +} + +func (ik internalKey) assert() { + if ik == nil { + panic("leveldb: nil internalKey") + } + if len(ik) < 8 { + panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik))) + } +} + +func (ik internalKey) ukey() []byte { + ik.assert() + return ik[:len(ik)-8] +} + +func (ik internalKey) num() uint64 { + ik.assert() + return binary.LittleEndian.Uint64(ik[len(ik)-8:]) +} + +func (ik internalKey) parseNum() (seq uint64, kt keyType) { + num := ik.num() + seq, kt = uint64(num>>8), keyType(num&0xff) + if kt > keyTypeVal { + panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) + } + return +} + +func (ik internalKey) String() string { + if ik == nil { + return "" + } + + if ukey, seq, kt, err := parseInternalKey(ik); err == nil { + return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) + } + return fmt.Sprintf("", []byte(ik)) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go new file mode 100644 index 0000000000..824e47f5f4 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go @@ -0,0 +1,479 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package memdb provides in-memory key/value database implementation. +package memdb + +import ( + "math/rand" + "sync" + + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Common errors. +var ( + ErrNotFound = errors.ErrNotFound + ErrIterReleased = errors.New("leveldb/memdb: iterator released") +) + +const tMaxHeight = 12 + +type dbIter struct { + util.BasicReleaser + p *DB + slice *util.Range + node int + forward bool + key, value []byte + err error +} + +func (i *dbIter) fill(checkStart, checkLimit bool) bool { + if i.node != 0 { + n := i.p.nodeData[i.node] + m := n + i.p.nodeData[i.node+nKey] + i.key = i.p.kvData[n:m] + if i.slice != nil { + switch { + case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: + fallthrough + case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: + i.node = 0 + goto bail + } + } + i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] + return true + } +bail: + i.key = nil + i.value = nil + return false +} + +func (i *dbIter) Valid() bool { + return i.node != 0 +} + +func (i *dbIter) First() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Start != nil { + i.node, _ = i.p.findGE(i.slice.Start, false) + } else { + i.node = i.p.nodeData[nNext] + } + return i.fill(false, true) +} + +func (i *dbIter) Last() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.forward = false + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Limit != nil { + i.node = i.p.findLT(i.slice.Limit) + } else { + i.node = i.p.findLast() + } + return i.fill(true, false) +} + +func (i *dbIter) Seek(key []byte) bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { + key = i.slice.Start + } + i.node, _ = i.p.findGE(key, false) + return i.fill(false, true) +} + +func (i *dbIter) Next() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + if i.node == 0 { + if !i.forward { + return i.First() + } + return false + } + i.forward = true + i.p.mu.RLock() + defer i.p.mu.RUnlock() + i.node = i.p.nodeData[i.node+nNext] + return i.fill(false, true) +} + +func (i *dbIter) Prev() bool { + if i.Released() { + i.err = ErrIterReleased + return false + } + + if i.node == 0 { + if i.forward { + return i.Last() + } + return false + } + i.forward = false + i.p.mu.RLock() + defer i.p.mu.RUnlock() + i.node = i.p.findLT(i.key) + return i.fill(true, false) +} + +func (i *dbIter) Key() []byte { + return i.key +} + +func (i *dbIter) Value() []byte { + return i.value +} + +func (i *dbIter) Error() error { return i.err } + +func (i *dbIter) Release() { + if !i.Released() { + i.p = nil + i.node = 0 + i.key = nil + i.value = nil + i.BasicReleaser.Release() + } +} + +const ( + nKV = iota + nKey + nVal + nHeight + nNext +) + +// DB is an in-memory key/value database. +type DB struct { + cmp comparer.BasicComparer + rnd *rand.Rand + + mu sync.RWMutex + kvData []byte + // Node data: + // [0] : KV offset + // [1] : Key length + // [2] : Value length + // [3] : Height + // [3..height] : Next nodes + nodeData []int + prevNode [tMaxHeight]int + maxHeight int + n int + kvSize int +} + +func (p *DB) randHeight() (h int) { + const branching = 4 + h = 1 + for h < tMaxHeight && p.rnd.Int()%branching == 0 { + h++ + } + return +} + +// Must hold RW-lock if prev == true, as it use shared prevNode slice. +func (p *DB) findGE(key []byte, prev bool) (int, bool) { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + cmp := 1 + if next != 0 { + o := p.nodeData[next] + cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) + } + if cmp < 0 { + // Keep searching in this list + node = next + } else { + if prev { + p.prevNode[h] = node + } else if cmp == 0 { + return next, true + } + if h == 0 { + return next, cmp == 0 + } + h-- + } + } +} + +func (p *DB) findLT(key []byte) int { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + o := p.nodeData[next] + if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { + if h == 0 { + break + } + h-- + } else { + node = next + } + } + return node +} + +func (p *DB) findLast() int { + node := 0 + h := p.maxHeight - 1 + for { + next := p.nodeData[node+nNext+h] + if next == 0 { + if h == 0 { + break + } + h-- + } else { + node = next + } + } + return node +} + +// Put sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// +// It is safe to modify the contents of the arguments after Put returns. +func (p *DB) Put(key []byte, value []byte) error { + p.mu.Lock() + defer p.mu.Unlock() + + if node, exact := p.findGE(key, true); exact { + kvOffset := len(p.kvData) + p.kvData = append(p.kvData, key...) + p.kvData = append(p.kvData, value...) + p.nodeData[node] = kvOffset + m := p.nodeData[node+nVal] + p.nodeData[node+nVal] = len(value) + p.kvSize += len(value) - m + return nil + } + + h := p.randHeight() + if h > p.maxHeight { + for i := p.maxHeight; i < h; i++ { + p.prevNode[i] = 0 + } + p.maxHeight = h + } + + kvOffset := len(p.kvData) + p.kvData = append(p.kvData, key...) + p.kvData = append(p.kvData, value...) + // Node + node := len(p.nodeData) + p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) + for i, n := range p.prevNode[:h] { + m := n + nNext + i + p.nodeData = append(p.nodeData, p.nodeData[m]) + p.nodeData[m] = node + } + + p.kvSize += len(key) + len(value) + p.n++ + return nil +} + +// Delete deletes the value for the given key. It returns ErrNotFound if +// the DB does not contain the key. +// +// It is safe to modify the contents of the arguments after Delete returns. +func (p *DB) Delete(key []byte) error { + p.mu.Lock() + defer p.mu.Unlock() + + node, exact := p.findGE(key, true) + if !exact { + return ErrNotFound + } + + h := p.nodeData[node+nHeight] + for i, n := range p.prevNode[:h] { + m := n + nNext + i + p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] + } + + p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] + p.n-- + return nil +} + +// Contains returns true if the given key are in the DB. +// +// It is safe to modify the contents of the arguments after Contains returns. +func (p *DB) Contains(key []byte) bool { + p.mu.RLock() + _, exact := p.findGE(key, false) + p.mu.RUnlock() + return exact +} + +// Get gets the value for the given key. It returns error.ErrNotFound if the +// DB does not contain the key. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Get returns. +func (p *DB) Get(key []byte) (value []byte, err error) { + p.mu.RLock() + if node, exact := p.findGE(key, false); exact { + o := p.nodeData[node] + p.nodeData[node+nKey] + value = p.kvData[o : o+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +// Find finds key/value pair whose key is greater than or equal to the +// given key. It returns ErrNotFound if the table doesn't contain +// such pair. +// +// The caller should not modify the contents of the returned slice, but +// it is safe to modify the contents of the argument after Find returns. +func (p *DB) Find(key []byte) (rkey, value []byte, err error) { + p.mu.RLock() + if node, _ := p.findGE(key, false); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +// NewIterator returns an iterator of the DB. +// The returned iterator is not safe for concurrent use, but it is safe to use +// multiple iterators concurrently, with each in a dedicated goroutine. +// It is also safe to use an iterator concurrently with modifying its +// underlying DB. However, the resultant key/value pairs are not guaranteed +// to be a consistent snapshot of the DB at a particular point in time. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// DB. And a nil Range.Limit is treated as a key after all keys in +// the DB. +// +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// +// The iterator must be released after use, by calling Release method. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { + return &dbIter{p: p, slice: slice} +} + +// Capacity returns keys/values buffer capacity. +func (p *DB) Capacity() int { + p.mu.RLock() + defer p.mu.RUnlock() + return cap(p.kvData) +} + +// Size returns sum of keys and values length. Note that deleted +// key/value will not be accounted for, but it will still consume +// the buffer, since the buffer is append only. +func (p *DB) Size() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.kvSize +} + +// Free returns keys/values free buffer before need to grow. +func (p *DB) Free() int { + p.mu.RLock() + defer p.mu.RUnlock() + return cap(p.kvData) - len(p.kvData) +} + +// Len returns the number of entries in the DB. +func (p *DB) Len() int { + p.mu.RLock() + defer p.mu.RUnlock() + return p.n +} + +// Reset resets the DB to initial empty state. Allows reuse the buffer. +func (p *DB) Reset() { + p.mu.Lock() + p.rnd = rand.New(rand.NewSource(0xdeadbeef)) + p.maxHeight = 1 + p.n = 0 + p.kvSize = 0 + p.kvData = p.kvData[:0] + p.nodeData = p.nodeData[:nNext+tMaxHeight] + p.nodeData[nKV] = 0 + p.nodeData[nKey] = 0 + p.nodeData[nVal] = 0 + p.nodeData[nHeight] = tMaxHeight + for n := 0; n < tMaxHeight; n++ { + p.nodeData[nNext+n] = 0 + p.prevNode[n] = 0 + } + p.mu.Unlock() +} + +// New creates a new initialized in-memory key/value DB. The capacity +// is the initial key/value buffer capacity. The capacity is advisory, +// not enforced. +// +// This DB is append-only, deleting an entry would remove entry node but not +// reclaim KV buffer. +// +// The returned DB instance is safe for concurrent use. +func New(cmp comparer.BasicComparer, capacity int) *DB { + p := &DB{ + cmp: cmp, + rnd: rand.New(rand.NewSource(0xdeadbeef)), + maxHeight: 1, + kvData: make([]byte, 0, capacity), + nodeData: make([]int, 4+tMaxHeight), + } + p.nodeData[nHeight] = tMaxHeight + return p +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go new file mode 100644 index 0000000000..528b164233 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go @@ -0,0 +1,697 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package opt provides sets of options used by LevelDB. +package opt + +import ( + "math" + + "github.com/syndtr/goleveldb/leveldb/cache" + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/filter" +) + +const ( + KiB = 1024 + MiB = KiB * 1024 + GiB = MiB * 1024 +) + +var ( + DefaultBlockCacher = LRUCacher + DefaultBlockCacheCapacity = 8 * MiB + DefaultBlockRestartInterval = 16 + DefaultBlockSize = 4 * KiB + DefaultCompactionExpandLimitFactor = 25 + DefaultCompactionGPOverlapsFactor = 10 + DefaultCompactionL0Trigger = 4 + DefaultCompactionSourceLimitFactor = 1 + DefaultCompactionTableSize = 2 * MiB + DefaultCompactionTableSizeMultiplier = 1.0 + DefaultCompactionTotalSize = 10 * MiB + DefaultCompactionTotalSizeMultiplier = 10.0 + DefaultCompressionType = SnappyCompression + DefaultIteratorSamplingRate = 1 * MiB + DefaultOpenFilesCacher = LRUCacher + DefaultOpenFilesCacheCapacity = 500 + DefaultWriteBuffer = 4 * MiB + DefaultWriteL0PauseTrigger = 12 + DefaultWriteL0SlowdownTrigger = 8 +) + +// Cacher is a caching algorithm. +type Cacher interface { + New(capacity int) cache.Cacher +} + +type CacherFunc struct { + NewFunc func(capacity int) cache.Cacher +} + +func (f *CacherFunc) New(capacity int) cache.Cacher { + if f.NewFunc != nil { + return f.NewFunc(capacity) + } + return nil +} + +func noCacher(int) cache.Cacher { return nil } + +var ( + // LRUCacher is the LRU-cache algorithm. + LRUCacher = &CacherFunc{cache.NewLRU} + + // NoCacher is the value to disable caching algorithm. + NoCacher = &CacherFunc{} +) + +// Compression is the 'sorted table' block compression algorithm to use. +type Compression uint + +func (c Compression) String() string { + switch c { + case DefaultCompression: + return "default" + case NoCompression: + return "none" + case SnappyCompression: + return "snappy" + } + return "invalid" +} + +const ( + DefaultCompression Compression = iota + NoCompression + SnappyCompression + nCompression +) + +// Strict is the DB 'strict level'. +type Strict uint + +const ( + // If present then a corrupted or invalid chunk or block in manifest + // journal will cause an error instead of being dropped. + // This will prevent database with corrupted manifest to be opened. + StrictManifest Strict = 1 << iota + + // If present then journal chunk checksum will be verified. + StrictJournalChecksum + + // If present then a corrupted or invalid chunk or block in journal + // will cause an error instead of being dropped. + // This will prevent database with corrupted journal to be opened. + StrictJournal + + // If present then 'sorted table' block checksum will be verified. + // This has effect on both 'read operation' and compaction. + StrictBlockChecksum + + // If present then a corrupted 'sorted table' will fails compaction. + // The database will enter read-only mode. + StrictCompaction + + // If present then a corrupted 'sorted table' will halts 'read operation'. + StrictReader + + // If present then leveldb.Recover will drop corrupted 'sorted table'. + StrictRecovery + + // This only applicable for ReadOptions, if present then this ReadOptions + // 'strict level' will override global ones. + StrictOverride + + // StrictAll enables all strict flags. + StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery + + // DefaultStrict is the default strict flags. Specify any strict flags + // will override default strict flags as whole (i.e. not OR'ed). + DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader + + // NoStrict disables all strict flags. Override default strict flags. + NoStrict = ^StrictAll +) + +// Options holds the optional parameters for the DB at large. +type Options struct { + // AltFilters defines one or more 'alternative filters'. + // 'alternative filters' will be used during reads if a filter block + // does not match with the 'effective filter'. + // + // The default value is nil + AltFilters []filter.Filter + + // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching. + // Specify NoCacher to disable caching algorithm. + // + // The default value is LRUCacher. + BlockCacher Cacher + + // BlockCacheCapacity defines the capacity of the 'sorted table' block caching. + // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher. + // + // The default value is 8MiB. + BlockCacheCapacity int + + // BlockCacheEvictRemoved allows enable forced-eviction on cached block belonging + // to removed 'sorted table'. + // + // The default if false. + BlockCacheEvictRemoved bool + + // BlockRestartInterval is the number of keys between restart points for + // delta encoding of keys. + // + // The default value is 16. + BlockRestartInterval int + + // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' + // block. + // + // The default value is 4KiB. + BlockSize int + + // CompactionExpandLimitFactor limits compaction size after expanded. + // This will be multiplied by table size limit at compaction target level. + // + // The default value is 25. + CompactionExpandLimitFactor int + + // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a + // single 'sorted table' generates. + // This will be multiplied by table size limit at grandparent level. + // + // The default value is 10. + CompactionGPOverlapsFactor int + + // CompactionL0Trigger defines number of 'sorted table' at level-0 that will + // trigger compaction. + // + // The default value is 4. + CompactionL0Trigger int + + // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to + // level-0. + // This will be multiplied by table size limit at compaction target level. + // + // The default value is 1. + CompactionSourceLimitFactor int + + // CompactionTableSize limits size of 'sorted table' that compaction generates. + // The limits for each level will be calculated as: + // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level) + // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel. + // + // The default value is 2MiB. + CompactionTableSize int + + // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize. + // + // The default value is 1. + CompactionTableSizeMultiplier float64 + + // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for + // CompactionTableSize. + // Use zero to skip a level. + // + // The default value is nil. + CompactionTableSizeMultiplierPerLevel []float64 + + // CompactionTotalSize limits total size of 'sorted table' for each level. + // The limits for each level will be calculated as: + // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level) + // The multiplier for each level can also fine-tuned using + // CompactionTotalSizeMultiplierPerLevel. + // + // The default value is 10MiB. + CompactionTotalSize int + + // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize. + // + // The default value is 10. + CompactionTotalSizeMultiplier float64 + + // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for + // CompactionTotalSize. + // Use zero to skip a level. + // + // The default value is nil. + CompactionTotalSizeMultiplierPerLevel []float64 + + // Comparer defines a total ordering over the space of []byte keys: a 'less + // than' relationship. The same comparison algorithm must be used for reads + // and writes over the lifetime of the DB. + // + // The default value uses the same ordering as bytes.Compare. + Comparer comparer.Comparer + + // Compression defines the 'sorted table' block compression to use. + // + // The default value (DefaultCompression) uses snappy compression. + Compression Compression + + // DisableBufferPool allows disable use of util.BufferPool functionality. + // + // The default value is false. + DisableBufferPool bool + + // DisableBlockCache allows disable use of cache.Cache functionality on + // 'sorted table' block. + // + // The default value is false. + DisableBlockCache bool + + // DisableCompactionBackoff allows disable compaction retry backoff. + // + // The default value is false. + DisableCompactionBackoff bool + + // DisableLargeBatchTransaction allows disabling switch-to-transaction mode + // on large batch write. If enable batch writes large than WriteBuffer will + // use transaction. + // + // The default is false. + DisableLargeBatchTransaction bool + + // ErrorIfExist defines whether an error should returned if the DB already + // exist. + // + // The default value is false. + ErrorIfExist bool + + // ErrorIfMissing defines whether an error should returned if the DB is + // missing. If false then the database will be created if missing, otherwise + // an error will be returned. + // + // The default value is false. + ErrorIfMissing bool + + // Filter defines an 'effective filter' to use. An 'effective filter' + // if defined will be used to generate per-table filter block. + // The filter name will be stored on disk. + // During reads LevelDB will try to find matching filter from + // 'effective filter' and 'alternative filters'. + // + // Filter can be changed after a DB has been created. It is recommended + // to put old filter to the 'alternative filters' to mitigate lack of + // filter during transition period. + // + // A filter is used to reduce disk reads when looking for a specific key. + // + // The default value is nil. + Filter filter.Filter + + // IteratorSamplingRate defines approximate gap (in bytes) between read + // sampling of an iterator. The samples will be used to determine when + // compaction should be triggered. + // + // The default is 1MiB. + IteratorSamplingRate int + + // NoSync allows completely disable fsync. + // + // The default is false. + NoSync bool + + // NoWriteMerge allows disabling write merge. + // + // The default is false. + NoWriteMerge bool + + // OpenFilesCacher provides cache algorithm for open files caching. + // Specify NoCacher to disable caching algorithm. + // + // The default value is LRUCacher. + OpenFilesCacher Cacher + + // OpenFilesCacheCapacity defines the capacity of the open files caching. + // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher. + // + // The default value is 500. + OpenFilesCacheCapacity int + + // If true then opens DB in read-only mode. + // + // The default value is false. + ReadOnly bool + + // Strict defines the DB strict level. + Strict Strict + + // WriteBuffer defines maximum size of a 'memdb' before flushed to + // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk + // unsorted journal. + // + // LevelDB may held up to two 'memdb' at the same time. + // + // The default value is 4MiB. + WriteBuffer int + + // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will + // pause write. + // + // The default value is 12. + WriteL0PauseTrigger int + + // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that + // will trigger write slowdown. + // + // The default value is 8. + WriteL0SlowdownTrigger int +} + +func (o *Options) GetAltFilters() []filter.Filter { + if o == nil { + return nil + } + return o.AltFilters +} + +func (o *Options) GetBlockCacher() Cacher { + if o == nil || o.BlockCacher == nil { + return DefaultBlockCacher + } else if o.BlockCacher == NoCacher { + return nil + } + return o.BlockCacher +} + +func (o *Options) GetBlockCacheCapacity() int { + if o == nil || o.BlockCacheCapacity == 0 { + return DefaultBlockCacheCapacity + } else if o.BlockCacheCapacity < 0 { + return 0 + } + return o.BlockCacheCapacity +} + +func (o *Options) GetBlockCacheEvictRemoved() bool { + if o == nil { + return false + } + return o.BlockCacheEvictRemoved +} + +func (o *Options) GetBlockRestartInterval() int { + if o == nil || o.BlockRestartInterval <= 0 { + return DefaultBlockRestartInterval + } + return o.BlockRestartInterval +} + +func (o *Options) GetBlockSize() int { + if o == nil || o.BlockSize <= 0 { + return DefaultBlockSize + } + return o.BlockSize +} + +func (o *Options) GetCompactionExpandLimit(level int) int { + factor := DefaultCompactionExpandLimitFactor + if o != nil && o.CompactionExpandLimitFactor > 0 { + factor = o.CompactionExpandLimitFactor + } + return o.GetCompactionTableSize(level+1) * factor +} + +func (o *Options) GetCompactionGPOverlaps(level int) int { + factor := DefaultCompactionGPOverlapsFactor + if o != nil && o.CompactionGPOverlapsFactor > 0 { + factor = o.CompactionGPOverlapsFactor + } + return o.GetCompactionTableSize(level+2) * factor +} + +func (o *Options) GetCompactionL0Trigger() int { + if o == nil || o.CompactionL0Trigger == 0 { + return DefaultCompactionL0Trigger + } + return o.CompactionL0Trigger +} + +func (o *Options) GetCompactionSourceLimit(level int) int { + factor := DefaultCompactionSourceLimitFactor + if o != nil && o.CompactionSourceLimitFactor > 0 { + factor = o.CompactionSourceLimitFactor + } + return o.GetCompactionTableSize(level+1) * factor +} + +func (o *Options) GetCompactionTableSize(level int) int { + var ( + base = DefaultCompactionTableSize + mult float64 + ) + if o != nil { + if o.CompactionTableSize > 0 { + base = o.CompactionTableSize + } + if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { + mult = o.CompactionTableSizeMultiplierPerLevel[level] + } else if o.CompactionTableSizeMultiplier > 0 { + mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) + } + } + if mult == 0 { + mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level)) + } + return int(float64(base) * mult) +} + +func (o *Options) GetCompactionTotalSize(level int) int64 { + var ( + base = DefaultCompactionTotalSize + mult float64 + ) + if o != nil { + if o.CompactionTotalSize > 0 { + base = o.CompactionTotalSize + } + if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { + mult = o.CompactionTotalSizeMultiplierPerLevel[level] + } else if o.CompactionTotalSizeMultiplier > 0 { + mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) + } + } + if mult == 0 { + mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level)) + } + return int64(float64(base) * mult) +} + +func (o *Options) GetComparer() comparer.Comparer { + if o == nil || o.Comparer == nil { + return comparer.DefaultComparer + } + return o.Comparer +} + +func (o *Options) GetCompression() Compression { + if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { + return DefaultCompressionType + } + return o.Compression +} + +func (o *Options) GetDisableBufferPool() bool { + if o == nil { + return false + } + return o.DisableBufferPool +} + +func (o *Options) GetDisableBlockCache() bool { + if o == nil { + return false + } + return o.DisableBlockCache +} + +func (o *Options) GetDisableCompactionBackoff() bool { + if o == nil { + return false + } + return o.DisableCompactionBackoff +} + +func (o *Options) GetDisableLargeBatchTransaction() bool { + if o == nil { + return false + } + return o.DisableLargeBatchTransaction +} + +func (o *Options) GetErrorIfExist() bool { + if o == nil { + return false + } + return o.ErrorIfExist +} + +func (o *Options) GetErrorIfMissing() bool { + if o == nil { + return false + } + return o.ErrorIfMissing +} + +func (o *Options) GetFilter() filter.Filter { + if o == nil { + return nil + } + return o.Filter +} + +func (o *Options) GetIteratorSamplingRate() int { + if o == nil || o.IteratorSamplingRate <= 0 { + return DefaultIteratorSamplingRate + } + return o.IteratorSamplingRate +} + +func (o *Options) GetNoSync() bool { + if o == nil { + return false + } + return o.NoSync +} + +func (o *Options) GetNoWriteMerge() bool { + if o == nil { + return false + } + return o.NoWriteMerge +} + +func (o *Options) GetOpenFilesCacher() Cacher { + if o == nil || o.OpenFilesCacher == nil { + return DefaultOpenFilesCacher + } + if o.OpenFilesCacher == NoCacher { + return nil + } + return o.OpenFilesCacher +} + +func (o *Options) GetOpenFilesCacheCapacity() int { + if o == nil || o.OpenFilesCacheCapacity == 0 { + return DefaultOpenFilesCacheCapacity + } else if o.OpenFilesCacheCapacity < 0 { + return 0 + } + return o.OpenFilesCacheCapacity +} + +func (o *Options) GetReadOnly() bool { + if o == nil { + return false + } + return o.ReadOnly +} + +func (o *Options) GetStrict(strict Strict) bool { + if o == nil || o.Strict == 0 { + return DefaultStrict&strict != 0 + } + return o.Strict&strict != 0 +} + +func (o *Options) GetWriteBuffer() int { + if o == nil || o.WriteBuffer <= 0 { + return DefaultWriteBuffer + } + return o.WriteBuffer +} + +func (o *Options) GetWriteL0PauseTrigger() int { + if o == nil || o.WriteL0PauseTrigger == 0 { + return DefaultWriteL0PauseTrigger + } + return o.WriteL0PauseTrigger +} + +func (o *Options) GetWriteL0SlowdownTrigger() int { + if o == nil || o.WriteL0SlowdownTrigger == 0 { + return DefaultWriteL0SlowdownTrigger + } + return o.WriteL0SlowdownTrigger +} + +// ReadOptions holds the optional parameters for 'read operation'. The +// 'read operation' includes Get, Find and NewIterator. +type ReadOptions struct { + // DontFillCache defines whether block reads for this 'read operation' + // should be cached. If false then the block will be cached. This does + // not affects already cached block. + // + // The default value is false. + DontFillCache bool + + // Strict will be OR'ed with global DB 'strict level' unless StrictOverride + // is present. Currently only StrictReader that has effect here. + Strict Strict +} + +func (ro *ReadOptions) GetDontFillCache() bool { + if ro == nil { + return false + } + return ro.DontFillCache +} + +func (ro *ReadOptions) GetStrict(strict Strict) bool { + if ro == nil { + return false + } + return ro.Strict&strict != 0 +} + +// WriteOptions holds the optional parameters for 'write operation'. The +// 'write operation' includes Write, Put and Delete. +type WriteOptions struct { + // NoWriteMerge allows disabling write merge. + // + // The default is false. + NoWriteMerge bool + + // Sync is whether to sync underlying writes from the OS buffer cache + // through to actual disk, if applicable. Setting Sync can result in + // slower writes. + // + // If false, and the machine crashes, then some recent writes may be lost. + // Note that if it is just the process that crashes (and the machine does + // not) then no writes will be lost. + // + // In other words, Sync being false has the same semantics as a write + // system call. Sync being true means write followed by fsync. + // + // The default value is false. + Sync bool +} + +func (wo *WriteOptions) GetNoWriteMerge() bool { + if wo == nil { + return false + } + return wo.NoWriteMerge +} + +func (wo *WriteOptions) GetSync() bool { + if wo == nil { + return false + } + return wo.Sync +} + +func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool { + if ro.GetStrict(StrictOverride) { + return ro.GetStrict(strict) + } else { + return o.GetStrict(strict) || ro.GetStrict(strict) + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/options.go new file mode 100644 index 0000000000..b072b1ac4c --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/options.go @@ -0,0 +1,107 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +func dupOptions(o *opt.Options) *opt.Options { + newo := &opt.Options{} + if o != nil { + *newo = *o + } + if newo.Strict == 0 { + newo.Strict = opt.DefaultStrict + } + return newo +} + +func (s *session) setOptions(o *opt.Options) { + no := dupOptions(o) + // Alternative filters. + if filters := o.GetAltFilters(); len(filters) > 0 { + no.AltFilters = make([]filter.Filter, len(filters)) + for i, filter := range filters { + no.AltFilters[i] = &iFilter{filter} + } + } + // Comparer. + s.icmp = &iComparer{o.GetComparer()} + no.Comparer = s.icmp + // Filter. + if filter := o.GetFilter(); filter != nil { + no.Filter = &iFilter{filter} + } + + s.o = &cachedOptions{Options: no} + s.o.cache() +} + +const optCachedLevel = 7 + +type cachedOptions struct { + *opt.Options + + compactionExpandLimit []int + compactionGPOverlaps []int + compactionSourceLimit []int + compactionTableSize []int + compactionTotalSize []int64 +} + +func (co *cachedOptions) cache() { + co.compactionExpandLimit = make([]int, optCachedLevel) + co.compactionGPOverlaps = make([]int, optCachedLevel) + co.compactionSourceLimit = make([]int, optCachedLevel) + co.compactionTableSize = make([]int, optCachedLevel) + co.compactionTotalSize = make([]int64, optCachedLevel) + + for level := 0; level < optCachedLevel; level++ { + co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) + co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) + co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) + co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level) + co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level) + } +} + +func (co *cachedOptions) GetCompactionExpandLimit(level int) int { + if level < optCachedLevel { + return co.compactionExpandLimit[level] + } + return co.Options.GetCompactionExpandLimit(level) +} + +func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { + if level < optCachedLevel { + return co.compactionGPOverlaps[level] + } + return co.Options.GetCompactionGPOverlaps(level) +} + +func (co *cachedOptions) GetCompactionSourceLimit(level int) int { + if level < optCachedLevel { + return co.compactionSourceLimit[level] + } + return co.Options.GetCompactionSourceLimit(level) +} + +func (co *cachedOptions) GetCompactionTableSize(level int) int { + if level < optCachedLevel { + return co.compactionTableSize[level] + } + return co.Options.GetCompactionTableSize(level) +} + +func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { + if level < optCachedLevel { + return co.compactionTotalSize[level] + } + return co.Options.GetCompactionTotalSize(level) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go new file mode 100644 index 0000000000..3f391f9346 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session.go @@ -0,0 +1,210 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "io" + "os" + "sync" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// ErrManifestCorrupted records manifest corruption. This error will be +// wrapped with errors.ErrCorrupted. +type ErrManifestCorrupted struct { + Field string + Reason string +} + +func (e *ErrManifestCorrupted) Error() string { + return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) +} + +func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error { + return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason}) +} + +// session represent a persistent database session. +type session struct { + // Need 64-bit alignment. + stNextFileNum int64 // current unused file number + stJournalNum int64 // current journal file number; need external synchronization + stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb + stTempFileNum int64 + stSeqNum uint64 // last mem compacted seq; need external synchronization + + stor *iStorage + storLock storage.Locker + o *cachedOptions + icmp *iComparer + tops *tOps + fileRef map[int64]int + + manifest *journal.Writer + manifestWriter storage.Writer + manifestFd storage.FileDesc + + stCompPtrs []internalKey // compaction pointers; need external synchronization + stVersion *version // current version + vmu sync.Mutex +} + +// Creates new initialized session instance. +func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { + if stor == nil { + return nil, os.ErrInvalid + } + storLock, err := stor.Lock() + if err != nil { + return + } + s = &session{ + stor: newIStorage(stor), + storLock: storLock, + fileRef: make(map[int64]int), + } + s.setOptions(o) + s.tops = newTableOps(s) + s.setVersion(newVersion(s)) + s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed") + return +} + +// Close session. +func (s *session) close() { + s.tops.close() + if s.manifest != nil { + s.manifest.Close() + } + if s.manifestWriter != nil { + s.manifestWriter.Close() + } + s.manifest = nil + s.manifestWriter = nil + s.setVersion(&version{s: s, closing: true}) +} + +// Release session lock. +func (s *session) release() { + s.storLock.Unlock() +} + +// Create a new database session; need external synchronization. +func (s *session) create() error { + // create manifest + return s.newManifest(nil, nil) +} + +// Recover a database session; need external synchronization. +func (s *session) recover() (err error) { + defer func() { + if os.IsNotExist(err) { + // Don't return os.ErrNotExist if the underlying storage contains + // other files that belong to LevelDB. So the DB won't get trashed. + if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 { + err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} + } + } + }() + + fd, err := s.stor.GetMeta() + if err != nil { + return + } + + reader, err := s.stor.Open(fd) + if err != nil { + return + } + defer reader.Close() + + var ( + // Options. + strict = s.o.GetStrict(opt.StrictManifest) + + jr = journal.NewReader(reader, dropper{s, fd}, strict, true) + rec = &sessionRecord{} + staging = s.stVersion.newStaging() + ) + for { + var r io.Reader + r, err = jr.Next() + if err != nil { + if err == io.EOF { + err = nil + break + } + return errors.SetFd(err, fd) + } + + err = rec.decode(r) + if err == nil { + // save compact pointers + for _, r := range rec.compPtrs { + s.setCompPtr(r.level, internalKey(r.ikey)) + } + // commit record to version staging + staging.commit(rec) + } else { + err = errors.SetFd(err, fd) + if strict || !errors.IsCorrupted(err) { + return + } + s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd)) + } + rec.resetCompPtrs() + rec.resetAddedTables() + rec.resetDeletedTables() + } + + switch { + case !rec.has(recComparer): + return newErrManifestCorrupted(fd, "comparer", "missing") + case rec.comparer != s.icmp.uName(): + return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) + case !rec.has(recNextFileNum): + return newErrManifestCorrupted(fd, "next-file-num", "missing") + case !rec.has(recJournalNum): + return newErrManifestCorrupted(fd, "journal-file-num", "missing") + case !rec.has(recSeqNum): + return newErrManifestCorrupted(fd, "seq-num", "missing") + } + + s.manifestFd = fd + s.setVersion(staging.finish()) + s.setNextFileNum(rec.nextFileNum) + s.recordCommited(rec) + return nil +} + +// Commit session; need external synchronization. +func (s *session) commit(r *sessionRecord) (err error) { + v := s.version() + defer v.release() + + // spawn new version based on current version + nv := v.spawn(r) + + if s.manifest == nil { + // manifest journal writer not yet created, create one + err = s.newManifest(r, nv) + } else { + err = s.flushManifest(r) + } + + // finally, apply new version if no error rise + if err == nil { + s.setVersion(nv) + } + + return +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go new file mode 100644 index 0000000000..089cd00b26 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go @@ -0,0 +1,302 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/memdb" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int { + v := s.version() + defer v.release() + return v.pickMemdbLevel(umin, umax, maxLevel) +} + +func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) { + // Create sorted table. + iter := mdb.NewIterator(nil) + defer iter.Release() + t, n, err := s.tops.createFrom(iter) + if err != nil { + return 0, err + } + + // Pick level other than zero can cause compaction issue with large + // bulk insert and delete on strictly incrementing key-space. The + // problem is that the small deletion markers trapped at lower level, + // while key/value entries keep growing at higher level. Since the + // key-space is strictly incrementing it will not overlaps with + // higher level, thus maximum possible level is always picked, while + // overlapping deletion marker pushed into lower level. + // See: https://github.com/syndtr/goleveldb/issues/127. + flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel) + rec.addTableFile(flushLevel, t) + + s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + return flushLevel, nil +} + +// Pick a compaction based on current state; need external synchronization. +func (s *session) pickCompaction() *compaction { + v := s.version() + + var sourceLevel int + var t0 tFiles + if v.cScore >= 1 { + sourceLevel = v.cLevel + cptr := s.getCompPtr(sourceLevel) + tables := v.levels[sourceLevel] + for _, t := range tables { + if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { + t0 = append(t0, t) + break + } + } + if len(t0) == 0 { + t0 = append(t0, tables[0]) + } + } else { + if p := atomic.LoadPointer(&v.cSeek); p != nil { + ts := (*tSet)(p) + sourceLevel = ts.level + t0 = append(t0, ts.table) + } else { + v.release() + return nil + } + } + + return newCompaction(s, v, sourceLevel, t0) +} + +// Create compaction from given level and range; need external synchronization. +func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction { + v := s.version() + + if sourceLevel >= len(v.levels) { + v.release() + return nil + } + + t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0) + if len(t0) == 0 { + v.release() + return nil + } + + // Avoid compacting too much in one shot in case the range is large. + // But we cannot do this for level-0 since level-0 files can overlap + // and we must not pick one file and drop another older file if the + // two files overlap. + if !noLimit && sourceLevel > 0 { + limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel)) + total := int64(0) + for i, t := range t0 { + total += t.size + if total >= limit { + s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) + t0 = t0[:i+1] + break + } + } + } + + return newCompaction(s, v, sourceLevel, t0) +} + +func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction { + c := &compaction{ + s: s, + v: v, + sourceLevel: sourceLevel, + levels: [2]tFiles{t0, nil}, + maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)), + tPtrs: make([]int, len(v.levels)), + } + c.expand() + c.save() + return c +} + +// compaction represent a compaction state. +type compaction struct { + s *session + v *version + + sourceLevel int + levels [2]tFiles + maxGPOverlaps int64 + + gp tFiles + gpi int + seenKey bool + gpOverlappedBytes int64 + imin, imax internalKey + tPtrs []int + released bool + + snapGPI int + snapSeenKey bool + snapGPOverlappedBytes int64 + snapTPtrs []int +} + +func (c *compaction) save() { + c.snapGPI = c.gpi + c.snapSeenKey = c.seenKey + c.snapGPOverlappedBytes = c.gpOverlappedBytes + c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...) +} + +func (c *compaction) restore() { + c.gpi = c.snapGPI + c.seenKey = c.snapSeenKey + c.gpOverlappedBytes = c.snapGPOverlappedBytes + c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...) +} + +func (c *compaction) release() { + if !c.released { + c.released = true + c.v.release() + } +} + +// Expand compacted tables; need external synchronization. +func (c *compaction) expand() { + limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel)) + vt0 := c.v.levels[c.sourceLevel] + vt1 := tFiles{} + if level := c.sourceLevel + 1; level < len(c.v.levels) { + vt1 = c.v.levels[level] + } + + t0, t1 := c.levels[0], c.levels[1] + imin, imax := t0.getRange(c.s.icmp) + // We expand t0 here just incase ukey hop across tables. + t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0) + if len(t0) != len(c.levels[0]) { + imin, imax = t0.getRange(c.s.icmp) + } + t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) + // Get entire range covered by compaction. + amin, amax := append(t0, t1...).getRange(c.s.icmp) + + // See if we can grow the number of inputs in "sourceLevel" without + // changing the number of "sourceLevel+1" files we pick up. + if len(t1) > 0 { + exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0) + if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { + xmin, xmax := exp0.getRange(c.s.icmp) + exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) + if len(exp1) == len(t1) { + c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", + c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), + len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) + imin, imax = xmin, xmax + t0, t1 = exp0, exp1 + amin, amax = append(t0, t1...).getRange(c.s.icmp) + } + } + } + + // Compute the set of grandparent files that overlap this compaction + // (parent == sourceLevel+1; grandparent == sourceLevel+2) + if level := c.sourceLevel + 2; level < len(c.v.levels) { + c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) + } + + c.levels[0], c.levels[1] = t0, t1 + c.imin, c.imax = imin, imax +} + +// Check whether compaction is trivial. +func (c *compaction) trivial() bool { + return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps +} + +func (c *compaction) baseLevelForKey(ukey []byte) bool { + for level := c.sourceLevel + 2; level < len(c.v.levels); level++ { + tables := c.v.levels[level] + for c.tPtrs[level] < len(tables) { + t := tables[c.tPtrs[level]] + if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { + // We've advanced far enough. + if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { + // Key falls in this file's range, so definitely not base level. + return false + } + break + } + c.tPtrs[level]++ + } + } + return true +} + +func (c *compaction) shouldStopBefore(ikey internalKey) bool { + for ; c.gpi < len(c.gp); c.gpi++ { + gp := c.gp[c.gpi] + if c.s.icmp.Compare(ikey, gp.imax) <= 0 { + break + } + if c.seenKey { + c.gpOverlappedBytes += gp.size + } + } + c.seenKey = true + + if c.gpOverlappedBytes > c.maxGPOverlaps { + // Too much overlap for current output; start new output. + c.gpOverlappedBytes = 0 + return true + } + return false +} + +// Creates an iterator. +func (c *compaction) newIterator() iterator.Iterator { + // Creates iterator slice. + icap := len(c.levels) + if c.sourceLevel == 0 { + // Special case for level-0. + icap = len(c.levels[0]) + 1 + } + its := make([]iterator.Iterator, 0, icap) + + // Options. + ro := &opt.ReadOptions{ + DontFillCache: true, + Strict: opt.StrictOverride, + } + strict := c.s.o.GetStrict(opt.StrictCompaction) + if strict { + ro.Strict |= opt.StrictReader + } + + for i, tables := range c.levels { + if len(tables) == 0 { + continue + } + + // Level-0 is not sorted and may overlaps each other. + if c.sourceLevel+i == 0 { + for _, t := range tables { + its = append(its, c.s.tops.newIterator(t, nil, ro)) + } + } else { + it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict) + its = append(its, it) + } + } + + return iterator.NewMergedIterator(its, c.s.icmp, strict) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go new file mode 100644 index 0000000000..854e1aa6f9 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go @@ -0,0 +1,323 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bufio" + "encoding/binary" + "io" + "strings" + + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +type byteReader interface { + io.Reader + io.ByteReader +} + +// These numbers are written to disk and should not be changed. +const ( + recComparer = 1 + recJournalNum = 2 + recNextFileNum = 3 + recSeqNum = 4 + recCompPtr = 5 + recDelTable = 6 + recAddTable = 7 + // 8 was used for large value refs + recPrevJournalNum = 9 +) + +type cpRecord struct { + level int + ikey internalKey +} + +type atRecord struct { + level int + num int64 + size int64 + imin internalKey + imax internalKey +} + +type dtRecord struct { + level int + num int64 +} + +type sessionRecord struct { + hasRec int + comparer string + journalNum int64 + prevJournalNum int64 + nextFileNum int64 + seqNum uint64 + compPtrs []cpRecord + addedTables []atRecord + deletedTables []dtRecord + + scratch [binary.MaxVarintLen64]byte + err error +} + +func (p *sessionRecord) has(rec int) bool { + return p.hasRec&(1< +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/journal" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +// Logging. + +type dropper struct { + s *session + fd storage.FileDesc +} + +func (d dropper) Drop(err error) { + if e, ok := err.(*journal.ErrCorrupted); ok { + d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason) + } else { + d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err) + } +} + +func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } +func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } + +// File utils. + +func (s *session) newTemp() storage.FileDesc { + num := atomic.AddInt64(&s.stTempFileNum, 1) - 1 + return storage.FileDesc{Type: storage.TypeTemp, Num: num} +} + +func (s *session) addFileRef(fd storage.FileDesc, ref int) int { + ref += s.fileRef[fd.Num] + if ref > 0 { + s.fileRef[fd.Num] = ref + } else if ref == 0 { + delete(s.fileRef, fd.Num) + } else { + panic(fmt.Sprintf("negative ref: %v", fd)) + } + return ref +} + +// Session state. + +// Get current version. This will incr version ref, must call +// version.release (exactly once) after use. +func (s *session) version() *version { + s.vmu.Lock() + defer s.vmu.Unlock() + s.stVersion.incref() + return s.stVersion +} + +func (s *session) tLen(level int) int { + s.vmu.Lock() + defer s.vmu.Unlock() + return s.stVersion.tLen(level) +} + +// Set current version to v. +func (s *session) setVersion(v *version) { + s.vmu.Lock() + defer s.vmu.Unlock() + // Hold by session. It is important to call this first before releasing + // current version, otherwise the still used files might get released. + v.incref() + if s.stVersion != nil { + // Release current version. + s.stVersion.releaseNB() + } + s.stVersion = v +} + +// Get current unused file number. +func (s *session) nextFileNum() int64 { + return atomic.LoadInt64(&s.stNextFileNum) +} + +// Set current unused file number to num. +func (s *session) setNextFileNum(num int64) { + atomic.StoreInt64(&s.stNextFileNum, num) +} + +// Mark file number as used. +func (s *session) markFileNum(num int64) { + nextFileNum := num + 1 + for { + old, x := s.stNextFileNum, nextFileNum + if old > x { + x = old + } + if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { + break + } + } +} + +// Allocate a file number. +func (s *session) allocFileNum() int64 { + return atomic.AddInt64(&s.stNextFileNum, 1) - 1 +} + +// Reuse given file number. +func (s *session) reuseFileNum(num int64) { + for { + old, x := s.stNextFileNum, num + if old != x+1 { + x = old + } + if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { + break + } + } +} + +// Set compaction ptr at given level; need external synchronization. +func (s *session) setCompPtr(level int, ik internalKey) { + if level >= len(s.stCompPtrs) { + newCompPtrs := make([]internalKey, level+1) + copy(newCompPtrs, s.stCompPtrs) + s.stCompPtrs = newCompPtrs + } + s.stCompPtrs[level] = append(internalKey{}, ik...) +} + +// Get compaction ptr at given level; need external synchronization. +func (s *session) getCompPtr(level int) internalKey { + if level >= len(s.stCompPtrs) { + return nil + } + return s.stCompPtrs[level] +} + +// Manifest related utils. + +// Fill given session record obj with current states; need external +// synchronization. +func (s *session) fillRecord(r *sessionRecord, snapshot bool) { + r.setNextFileNum(s.nextFileNum()) + + if snapshot { + if !r.has(recJournalNum) { + r.setJournalNum(s.stJournalNum) + } + + if !r.has(recSeqNum) { + r.setSeqNum(s.stSeqNum) + } + + for level, ik := range s.stCompPtrs { + if ik != nil { + r.addCompPtr(level, ik) + } + } + + r.setComparer(s.icmp.uName()) + } +} + +// Mark if record has been committed, this will update session state; +// need external synchronization. +func (s *session) recordCommited(rec *sessionRecord) { + if rec.has(recJournalNum) { + s.stJournalNum = rec.journalNum + } + + if rec.has(recPrevJournalNum) { + s.stPrevJournalNum = rec.prevJournalNum + } + + if rec.has(recSeqNum) { + s.stSeqNum = rec.seqNum + } + + for _, r := range rec.compPtrs { + s.setCompPtr(r.level, internalKey(r.ikey)) + } +} + +// Create a new manifest file; need external synchronization. +func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { + fd := storage.FileDesc{Type: storage.TypeManifest, Num: s.allocFileNum()} + writer, err := s.stor.Create(fd) + if err != nil { + return + } + jw := journal.NewWriter(writer) + + if v == nil { + v = s.version() + defer v.release() + } + if rec == nil { + rec = &sessionRecord{} + } + s.fillRecord(rec, true) + v.fillRecord(rec) + + defer func() { + if err == nil { + s.recordCommited(rec) + if s.manifest != nil { + s.manifest.Close() + } + if s.manifestWriter != nil { + s.manifestWriter.Close() + } + if !s.manifestFd.Zero() { + s.stor.Remove(s.manifestFd) + } + s.manifestFd = fd + s.manifestWriter = writer + s.manifest = jw + } else { + writer.Close() + s.stor.Remove(fd) + s.reuseFileNum(fd.Num) + } + }() + + w, err := jw.Next() + if err != nil { + return + } + err = rec.encode(w) + if err != nil { + return + } + err = jw.Flush() + if err != nil { + return + } + err = s.stor.SetMeta(fd) + return +} + +// Flush record to disk. +func (s *session) flushManifest(rec *sessionRecord) (err error) { + s.fillRecord(rec, false) + w, err := s.manifest.Next() + if err != nil { + return + } + err = rec.encode(w) + if err != nil { + return + } + err = s.manifest.Flush() + if err != nil { + return + } + if !s.o.GetNoSync() { + err = s.manifestWriter.Sync() + if err != nil { + return + } + } + s.recordCommited(rec) + return +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go new file mode 100644 index 0000000000..d45fb5dfeb --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go @@ -0,0 +1,63 @@ +package leveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/storage" + "sync/atomic" +) + +type iStorage struct { + storage.Storage + read uint64 + write uint64 +} + +func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) { + r, err := c.Storage.Open(fd) + return &iStorageReader{r, c}, err +} + +func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) { + w, err := c.Storage.Create(fd) + return &iStorageWriter{w, c}, err +} + +func (c *iStorage) reads() uint64 { + return atomic.LoadUint64(&c.read) +} + +func (c *iStorage) writes() uint64 { + return atomic.LoadUint64(&c.write) +} + +// newIStorage returns the given storage wrapped by iStorage. +func newIStorage(s storage.Storage) *iStorage { + return &iStorage{s, 0, 0} +} + +type iStorageReader struct { + storage.Reader + c *iStorage +} + +func (r *iStorageReader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + atomic.AddUint64(&r.c.read, uint64(n)) + return n, err +} + +func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) { + n, err = r.Reader.ReadAt(p, off) + atomic.AddUint64(&r.c.read, uint64(n)) + return n, err +} + +type iStorageWriter struct { + storage.Writer + c *iStorage +} + +func (w *iStorageWriter) Write(p []byte) (n int, err error) { + n, err = w.Writer.Write(p) + atomic.AddUint64(&w.c.write, uint64(n)) + return n, err +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go new file mode 100644 index 0000000000..9ba71fd6d1 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go @@ -0,0 +1,671 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reservefs. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +var ( + errFileOpen = errors.New("leveldb/storage: file still open") + errReadOnly = errors.New("leveldb/storage: storage is read-only") +) + +type fileLock interface { + release() error +} + +type fileStorageLock struct { + fs *fileStorage +} + +func (lock *fileStorageLock) Unlock() { + if lock.fs != nil { + lock.fs.mu.Lock() + defer lock.fs.mu.Unlock() + if lock.fs.slock == lock { + lock.fs.slock = nil + } + } +} + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func writeFileSynced(filename string, data []byte, perm os.FileMode) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Sync(); err == nil { + err = err1 + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +const logSizeThreshold = 1024 * 1024 // 1 MiB + +// fileStorage is a file-system backed storage. +type fileStorage struct { + path string + readOnly bool + + mu sync.Mutex + flock fileLock + slock *fileStorageLock + logw *os.File + logSize int64 + buf []byte + // Opened file counter; if open < 0 means closed. + open int + day int +} + +// OpenFile returns a new filesystem-backed storage implementation with the given +// path. This also acquire a file lock, so any subsequent attempt to open the +// same path will fail. +// +// The storage must be closed after use, by calling Close method. +func OpenFile(path string, readOnly bool) (Storage, error) { + if fi, err := os.Stat(path); err == nil { + if !fi.IsDir() { + return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path) + } + } else if os.IsNotExist(err) && !readOnly { + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + } else { + return nil, err + } + + flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + flock.release() + } + }() + + var ( + logw *os.File + logSize int64 + ) + if !readOnly { + logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return nil, err + } + logSize, err = logw.Seek(0, os.SEEK_END) + if err != nil { + logw.Close() + return nil, err + } + } + + fs := &fileStorage{ + path: path, + readOnly: readOnly, + flock: flock, + logw: logw, + logSize: logSize, + } + runtime.SetFinalizer(fs, (*fileStorage).Close) + return fs, nil +} + +func (fs *fileStorage) Lock() (Locker, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + if fs.readOnly { + return &fileStorageLock{}, nil + } + if fs.slock != nil { + return nil, ErrLocked + } + fs.slock = &fileStorageLock{fs: fs} + return fs.slock, nil +} + +func itoa(buf []byte, i int, wid int) []byte { + u := uint(i) + if u == 0 && wid <= 1 { + return append(buf, '0') + } + + // Assemble decimal in reverse order. + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + return append(buf, b[bp:]...) +} + +func (fs *fileStorage) printDay(t time.Time) { + if fs.day == t.Day() { + return + } + fs.day = t.Day() + fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) +} + +func (fs *fileStorage) doLog(t time.Time, str string) { + if fs.logSize > logSizeThreshold { + // Rotate log file. + fs.logw.Close() + fs.logw = nil + fs.logSize = 0 + rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old")) + } + if fs.logw == nil { + var err error + fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return + } + // Force printDay on new log file. + fs.day = 0 + } + fs.printDay(t) + hour, min, sec := t.Clock() + msec := t.Nanosecond() / 1e3 + // time + fs.buf = itoa(fs.buf[:0], hour, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, min, 2) + fs.buf = append(fs.buf, ':') + fs.buf = itoa(fs.buf, sec, 2) + fs.buf = append(fs.buf, '.') + fs.buf = itoa(fs.buf, msec, 6) + fs.buf = append(fs.buf, ' ') + // write + fs.buf = append(fs.buf, []byte(str)...) + fs.buf = append(fs.buf, '\n') + n, _ := fs.logw.Write(fs.buf) + fs.logSize += int64(n) +} + +func (fs *fileStorage) Log(str string) { + if !fs.readOnly { + t := time.Now() + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return + } + fs.doLog(t, str) + } +} + +func (fs *fileStorage) log(str string) { + if !fs.readOnly { + fs.doLog(time.Now(), str) + } +} + +func (fs *fileStorage) setMeta(fd FileDesc) error { + content := fsGenName(fd) + "\n" + // Check and backup old CURRENT file. + currentPath := filepath.Join(fs.path, "CURRENT") + if _, err := os.Stat(currentPath); err == nil { + b, err := ioutil.ReadFile(currentPath) + if err != nil { + fs.log(fmt.Sprintf("backup CURRENT: %v", err)) + return err + } + if string(b) == content { + // Content not changed, do nothing. + return nil + } + if err := writeFileSynced(currentPath+".bak", b, 0644); err != nil { + fs.log(fmt.Sprintf("backup CURRENT: %v", err)) + return err + } + } else if !os.IsNotExist(err) { + return err + } + path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num) + if err := writeFileSynced(path, []byte(content), 0644); err != nil { + fs.log(fmt.Sprintf("create CURRENT.%d: %v", fd.Num, err)) + return err + } + // Replace CURRENT file. + if err := rename(path, currentPath); err != nil { + fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err)) + return err + } + // Sync root directory. + if err := syncDir(fs.path); err != nil { + fs.log(fmt.Sprintf("syncDir: %v", err)) + return err + } + return nil +} + +func (fs *fileStorage) SetMeta(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + return fs.setMeta(fd) +} + +func (fs *fileStorage) GetMeta() (FileDesc, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return FileDesc{}, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return FileDesc{}, err + } + names, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if ce := dir.Close(); ce != nil { + fs.log(fmt.Sprintf("close dir: %v", ce)) + } + if err != nil { + return FileDesc{}, err + } + // Try this in order: + // - CURRENT.[0-9]+ ('pending rename' file, descending order) + // - CURRENT + // - CURRENT.bak + // + // Skip corrupted file or file that point to a missing target file. + type currentFile struct { + name string + fd FileDesc + } + tryCurrent := func(name string) (*currentFile, error) { + b, err := ioutil.ReadFile(filepath.Join(fs.path, name)) + if err != nil { + if os.IsNotExist(err) { + err = os.ErrNotExist + } + return nil, err + } + var fd FileDesc + if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd) { + fs.log(fmt.Sprintf("%s: corrupted content: %q", name, b)) + err := &ErrCorrupted{ + Err: errors.New("leveldb/storage: corrupted or incomplete CURRENT file"), + } + return nil, err + } + if _, err := os.Stat(filepath.Join(fs.path, fsGenName(fd))); err != nil { + if os.IsNotExist(err) { + fs.log(fmt.Sprintf("%s: missing target file: %s", name, fd)) + err = os.ErrNotExist + } + return nil, err + } + return ¤tFile{name: name, fd: fd}, nil + } + tryCurrents := func(names []string) (*currentFile, error) { + var ( + cur *currentFile + // Last corruption error. + lastCerr error + ) + for _, name := range names { + var err error + cur, err = tryCurrent(name) + if err == nil { + break + } else if err == os.ErrNotExist { + // Fallback to the next file. + } else if isCorrupted(err) { + lastCerr = err + // Fallback to the next file. + } else { + // In case the error is due to permission, etc. + return nil, err + } + } + if cur == nil { + err := os.ErrNotExist + if lastCerr != nil { + err = lastCerr + } + return nil, err + } + return cur, nil + } + + // Try 'pending rename' files. + var nums []int64 + for _, name := range names { + if strings.HasPrefix(name, "CURRENT.") && name != "CURRENT.bak" { + i, err := strconv.ParseInt(name[8:], 10, 64) + if err == nil { + nums = append(nums, i) + } + } + } + var ( + pendCur *currentFile + pendErr = os.ErrNotExist + pendNames []string + ) + if len(nums) > 0 { + sort.Sort(sort.Reverse(int64Slice(nums))) + pendNames = make([]string, len(nums)) + for i, num := range nums { + pendNames[i] = fmt.Sprintf("CURRENT.%d", num) + } + pendCur, pendErr = tryCurrents(pendNames) + if pendErr != nil && pendErr != os.ErrNotExist && !isCorrupted(pendErr) { + return FileDesc{}, pendErr + } + } + + // Try CURRENT and CURRENT.bak. + curCur, curErr := tryCurrents([]string{"CURRENT", "CURRENT.bak"}) + if curErr != nil && curErr != os.ErrNotExist && !isCorrupted(curErr) { + return FileDesc{}, curErr + } + + // pendCur takes precedence, but guards against obsolete pendCur. + if pendCur != nil && (curCur == nil || pendCur.fd.Num > curCur.fd.Num) { + curCur = pendCur + } + + if curCur != nil { + // Restore CURRENT file to proper state. + if !fs.readOnly && (curCur.name != "CURRENT" || len(pendNames) != 0) { + // Ignore setMeta errors, however don't delete obsolete files if we + // catch error. + if err := fs.setMeta(curCur.fd); err == nil { + // Remove 'pending rename' files. + for _, name := range pendNames { + if err := os.Remove(filepath.Join(fs.path, name)); err != nil { + fs.log(fmt.Sprintf("remove %s: %v", name, err)) + } + } + } + } + return curCur.fd, nil + } + + // Nothing found. + if isCorrupted(pendErr) { + return FileDesc{}, pendErr + } + return FileDesc{}, curErr +} + +func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + dir, err := os.Open(fs.path) + if err != nil { + return + } + names, err := dir.Readdirnames(0) + // Close the dir first before checking for Readdirnames error. + if cerr := dir.Close(); cerr != nil { + fs.log(fmt.Sprintf("close dir: %v", cerr)) + } + if err == nil { + for _, name := range names { + if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 { + fds = append(fds, fd) + } + } + } + return +} + +func (fs *fileStorage) Open(fd FileDesc) (Reader, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0) + if err != nil { + if fsHasOldName(fd) && os.IsNotExist(err) { + of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0) + if err == nil { + goto ok + } + } + return nil, err + } +ok: + fs.open++ + return &fileWrap{File: of, fs: fs, fd: fd}, nil +} + +func (fs *fileStorage) Create(fd FileDesc) (Writer, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + if fs.readOnly { + return nil, errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return nil, ErrClosed + } + of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return nil, err + } + fs.open++ + return &fileWrap{File: of, fs: fs, fd: fd}, nil +} + +func (fs *fileStorage) Remove(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + err := os.Remove(filepath.Join(fs.path, fsGenName(fd))) + if err != nil { + if fsHasOldName(fd) && os.IsNotExist(err) { + if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) { + fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err)) + err = e1 + } + } else { + fs.log(fmt.Sprintf("remove %s: %v", fd, err)) + } + } + return err +} + +func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error { + if !FileDescOk(oldfd) || !FileDescOk(newfd) { + return ErrInvalidFile + } + if oldfd == newfd { + return nil + } + if fs.readOnly { + return errReadOnly + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd))) +} + +func (fs *fileStorage) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.open < 0 { + return ErrClosed + } + // Clear the finalizer. + runtime.SetFinalizer(fs, nil) + + if fs.open > 0 { + fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open)) + } + fs.open = -1 + if fs.logw != nil { + fs.logw.Close() + } + return fs.flock.release() +} + +type fileWrap struct { + *os.File + fs *fileStorage + fd FileDesc + closed bool +} + +func (fw *fileWrap) Sync() error { + if err := fw.File.Sync(); err != nil { + return err + } + if fw.fd.Type == TypeManifest { + // Also sync parent directory if file type is manifest. + // See: https://code.google.com/p/leveldb/issues/detail?id=190. + if err := syncDir(fw.fs.path); err != nil { + fw.fs.log(fmt.Sprintf("syncDir: %v", err)) + return err + } + } + return nil +} + +func (fw *fileWrap) Close() error { + fw.fs.mu.Lock() + defer fw.fs.mu.Unlock() + if fw.closed { + return ErrClosed + } + fw.closed = true + fw.fs.open-- + err := fw.File.Close() + if err != nil { + fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err)) + } + return err +} + +func fsGenName(fd FileDesc) string { + switch fd.Type { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", fd.Num) + case TypeJournal: + return fmt.Sprintf("%06d.log", fd.Num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", fd.Num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", fd.Num) + default: + panic("invalid file type") + } +} + +func fsHasOldName(fd FileDesc) bool { + return fd.Type == TypeTable +} + +func fsGenOldName(fd FileDesc) string { + switch fd.Type { + case TypeTable: + return fmt.Sprintf("%06d.sst", fd.Num) + } + return fsGenName(fd) +} + +func fsParseName(name string) (fd FileDesc, ok bool) { + var tail string + _, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail) + if err == nil { + switch tail { + case "log": + fd.Type = TypeJournal + case "ldb", "sst": + fd.Type = TypeTable + case "tmp": + fd.Type = TypeTemp + default: + return + } + return fd, true + } + n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail) + if n == 1 { + fd.Type = TypeManifest + return fd, true + } + return +} + +func fsParseNamePtr(name string, fd *FileDesc) bool { + _fd, ok := fsParseName(name) + if fd != nil { + *fd = _fd + } + return ok +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go new file mode 100644 index 0000000000..5545aeef2a --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go @@ -0,0 +1,34 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build nacl + +package storage + +import ( + "os" + "syscall" +) + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + return nil, syscall.ENOTSUP +} + +func setFileLock(f *os.File, readOnly, lock bool) error { + return syscall.ENOTSUP +} + +func rename(oldpath, newpath string) error { + return syscall.ENOTSUP +} + +func isErrInvalid(err error) bool { + return false +} + +func syncDir(name string) error { + return syscall.ENOTSUP +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go new file mode 100644 index 0000000000..b829798012 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go @@ -0,0 +1,63 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "os" +) + +type plan9FileLock struct { + f *os.File +} + +func (fl *plan9FileLock) release() error { + return fl.f.Close() +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var ( + flag int + perm os.FileMode + ) + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + perm = os.ModeExclusive + } + f, err := os.OpenFile(path, flag, perm) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644) + } + if err != nil { + return + } + fl = &plan9FileLock{f: f} + return +} + +func rename(oldpath, newpath string) error { + if _, err := os.Stat(newpath); err == nil { + if err := os.Remove(newpath); err != nil { + return err + } + } + + return os.Rename(oldpath, newpath) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go new file mode 100644 index 0000000000..79901ee4a7 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go @@ -0,0 +1,81 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build solaris + +package storage + +import ( + "os" + "syscall" +) + +type unixFileLock struct { + f *os.File +} + +func (fl *unixFileLock) release() error { + if err := setFileLock(fl.f, false, false); err != nil { + return err + } + return fl.f.Close() +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var flag int + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + } + f, err := os.OpenFile(path, flag, 0) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) + } + if err != nil { + return + } + err = setFileLock(f, readOnly, true) + if err != nil { + f.Close() + return + } + fl = &unixFileLock{f: f} + return +} + +func setFileLock(f *os.File, readOnly, lock bool) error { + flock := syscall.Flock_t{ + Type: syscall.F_UNLCK, + Start: 0, + Len: 0, + Whence: 1, + } + if lock { + if readOnly { + flock.Type = syscall.F_RDLCK + } else { + flock.Type = syscall.F_WRLCK + } + } + return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) +} + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func syncDir(name string) error { + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go new file mode 100644 index 0000000000..d75f66a9ef --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go @@ -0,0 +1,98 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package storage + +import ( + "os" + "syscall" +) + +type unixFileLock struct { + f *os.File +} + +func (fl *unixFileLock) release() error { + if err := setFileLock(fl.f, false, false); err != nil { + return err + } + return fl.f.Close() +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + var flag int + if readOnly { + flag = os.O_RDONLY + } else { + flag = os.O_RDWR + } + f, err := os.OpenFile(path, flag, 0) + if os.IsNotExist(err) { + f, err = os.OpenFile(path, flag|os.O_CREATE, 0644) + } + if err != nil { + return + } + err = setFileLock(f, readOnly, true) + if err != nil { + f.Close() + return + } + fl = &unixFileLock{f: f} + return +} + +func setFileLock(f *os.File, readOnly, lock bool) error { + how := syscall.LOCK_UN + if lock { + if readOnly { + how = syscall.LOCK_SH + } else { + how = syscall.LOCK_EX + } + } + return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) +} + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func isErrInvalid(err error) bool { + if err == os.ErrInvalid { + return true + } + // Go < 1.8 + if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL { + return true + } + // Go >= 1.8 returns *os.PathError instead + if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL { + return true + } + return false +} + +func syncDir(name string) error { + // As per fsync manpage, Linux seems to expect fsync on directory, however + // some system don't support this, so we will ignore syscall.EINVAL. + // + // From fsync(2): + // Calling fsync() does not necessarily ensure that the entry in the + // directory containing the file has also reached disk. For that an + // explicit fsync() on a file descriptor for the directory is also needed. + f, err := os.Open(name) + if err != nil { + return err + } + defer f.Close() + if err := f.Sync(); err != nil && !isErrInvalid(err) { + return err + } + return nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go new file mode 100644 index 0000000000..899335fd7e --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go @@ -0,0 +1,78 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procMoveFileExW = modkernel32.NewProc("MoveFileExW") +) + +const ( + _MOVEFILE_REPLACE_EXISTING = 1 +) + +type windowsFileLock struct { + fd syscall.Handle +} + +func (fl *windowsFileLock) release() error { + return syscall.Close(fl.fd) +} + +func newFileLock(path string, readOnly bool) (fl fileLock, err error) { + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return + } + var access, shareMode uint32 + if readOnly { + access = syscall.GENERIC_READ + shareMode = syscall.FILE_SHARE_READ + } else { + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0) + if err == syscall.ERROR_FILE_NOT_FOUND { + fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) + } + if err != nil { + return + } + fl = &windowsFileLock{fd: fd} + return +} + +func moveFileEx(from *uint16, to *uint16, flags uint32) error { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + return error(e1) + } + return syscall.EINVAL + } + return nil +} + +func rename(oldpath, newpath string) error { + from, err := syscall.UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := syscall.UTF16PtrFromString(newpath) + if err != nil { + return err + } + return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) +} + +func syncDir(name string) error { return nil } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go new file mode 100644 index 0000000000..838f1bee1b --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go @@ -0,0 +1,222 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "bytes" + "os" + "sync" +) + +const typeShift = 4 + +// Verify at compile-time that typeShift is large enough to cover all FileType +// values by confirming that 0 == 0. +var _ [0]struct{} = [TypeAll >> typeShift]struct{}{} + +type memStorageLock struct { + ms *memStorage +} + +func (lock *memStorageLock) Unlock() { + ms := lock.ms + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock == lock { + ms.slock = nil + } + return +} + +// memStorage is a memory-backed storage. +type memStorage struct { + mu sync.Mutex + slock *memStorageLock + files map[uint64]*memFile + meta FileDesc +} + +// NewMemStorage returns a new memory-backed storage implementation. +func NewMemStorage() Storage { + return &memStorage{ + files: make(map[uint64]*memFile), + } +} + +func (ms *memStorage) Lock() (Locker, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.slock != nil { + return nil, ErrLocked + } + ms.slock = &memStorageLock{ms: ms} + return ms.slock, nil +} + +func (*memStorage) Log(str string) {} + +func (ms *memStorage) SetMeta(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + + ms.mu.Lock() + ms.meta = fd + ms.mu.Unlock() + return nil +} + +func (ms *memStorage) GetMeta() (FileDesc, error) { + ms.mu.Lock() + defer ms.mu.Unlock() + if ms.meta.Zero() { + return FileDesc{}, os.ErrNotExist + } + return ms.meta, nil +} + +func (ms *memStorage) List(ft FileType) ([]FileDesc, error) { + ms.mu.Lock() + var fds []FileDesc + for x := range ms.files { + fd := unpackFile(x) + if fd.Type&ft != 0 { + fds = append(fds, fd) + } + } + ms.mu.Unlock() + return fds, nil +} + +func (ms *memStorage) Open(fd FileDesc) (Reader, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + ms.mu.Lock() + defer ms.mu.Unlock() + if m, exist := ms.files[packFile(fd)]; exist { + if m.open { + return nil, errFileOpen + } + m.open = true + return &memReader{Reader: bytes.NewReader(m.Bytes()), ms: ms, m: m}, nil + } + return nil, os.ErrNotExist +} + +func (ms *memStorage) Create(fd FileDesc) (Writer, error) { + if !FileDescOk(fd) { + return nil, ErrInvalidFile + } + + x := packFile(fd) + ms.mu.Lock() + defer ms.mu.Unlock() + m, exist := ms.files[x] + if exist { + if m.open { + return nil, errFileOpen + } + m.Reset() + } else { + m = &memFile{} + ms.files[x] = m + } + m.open = true + return &memWriter{memFile: m, ms: ms}, nil +} + +func (ms *memStorage) Remove(fd FileDesc) error { + if !FileDescOk(fd) { + return ErrInvalidFile + } + + x := packFile(fd) + ms.mu.Lock() + defer ms.mu.Unlock() + if _, exist := ms.files[x]; exist { + delete(ms.files, x) + return nil + } + return os.ErrNotExist +} + +func (ms *memStorage) Rename(oldfd, newfd FileDesc) error { + if !FileDescOk(oldfd) || !FileDescOk(newfd) { + return ErrInvalidFile + } + if oldfd == newfd { + return nil + } + + oldx := packFile(oldfd) + newx := packFile(newfd) + ms.mu.Lock() + defer ms.mu.Unlock() + oldm, exist := ms.files[oldx] + if !exist { + return os.ErrNotExist + } + newm, exist := ms.files[newx] + if (exist && newm.open) || oldm.open { + return errFileOpen + } + delete(ms.files, oldx) + ms.files[newx] = oldm + return nil +} + +func (*memStorage) Close() error { return nil } + +type memFile struct { + bytes.Buffer + open bool +} + +type memReader struct { + *bytes.Reader + ms *memStorage + m *memFile + closed bool +} + +func (mr *memReader) Close() error { + mr.ms.mu.Lock() + defer mr.ms.mu.Unlock() + if mr.closed { + return ErrClosed + } + mr.m.open = false + return nil +} + +type memWriter struct { + *memFile + ms *memStorage + closed bool +} + +func (*memWriter) Sync() error { return nil } + +func (mw *memWriter) Close() error { + mw.ms.mu.Lock() + defer mw.ms.mu.Unlock() + if mw.closed { + return ErrClosed + } + mw.memFile.open = false + return nil +} + +func packFile(fd FileDesc) uint64 { + return uint64(fd.Num)<> typeShift)} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go new file mode 100644 index 0000000000..4e4a724258 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go @@ -0,0 +1,187 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package storage provides storage abstraction for LevelDB. +package storage + +import ( + "errors" + "fmt" + "io" +) + +// FileType represent a file type. +type FileType int + +// File types. +const ( + TypeManifest FileType = 1 << iota + TypeJournal + TypeTable + TypeTemp + + TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp +) + +func (t FileType) String() string { + switch t { + case TypeManifest: + return "manifest" + case TypeJournal: + return "journal" + case TypeTable: + return "table" + case TypeTemp: + return "temp" + } + return fmt.Sprintf("", t) +} + +// Common error. +var ( + ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") + ErrLocked = errors.New("leveldb/storage: already locked") + ErrClosed = errors.New("leveldb/storage: closed") +) + +// ErrCorrupted is the type that wraps errors that indicate corruption of +// a file. Package storage has its own type instead of using +// errors.ErrCorrupted to prevent circular import. +type ErrCorrupted struct { + Fd FileDesc + Err error +} + +func isCorrupted(err error) bool { + switch err.(type) { + case *ErrCorrupted: + return true + } + return false +} + +func (e *ErrCorrupted) Error() string { + if !e.Fd.Zero() { + return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) + } + return e.Err.Error() +} + +// Syncer is the interface that wraps basic Sync method. +type Syncer interface { + // Sync commits the current contents of the file to stable storage. + Sync() error +} + +// Reader is the interface that groups the basic Read, Seek, ReadAt and Close +// methods. +type Reader interface { + io.ReadSeeker + io.ReaderAt + io.Closer +} + +// Writer is the interface that groups the basic Write, Sync and Close +// methods. +type Writer interface { + io.WriteCloser + Syncer +} + +// Locker is the interface that wraps Unlock method. +type Locker interface { + Unlock() +} + +// FileDesc is a 'file descriptor'. +type FileDesc struct { + Type FileType + Num int64 +} + +func (fd FileDesc) String() string { + switch fd.Type { + case TypeManifest: + return fmt.Sprintf("MANIFEST-%06d", fd.Num) + case TypeJournal: + return fmt.Sprintf("%06d.log", fd.Num) + case TypeTable: + return fmt.Sprintf("%06d.ldb", fd.Num) + case TypeTemp: + return fmt.Sprintf("%06d.tmp", fd.Num) + default: + return fmt.Sprintf("%#x-%d", fd.Type, fd.Num) + } +} + +// Zero returns true if fd == (FileDesc{}). +func (fd FileDesc) Zero() bool { + return fd == (FileDesc{}) +} + +// FileDescOk returns true if fd is a valid 'file descriptor'. +func FileDescOk(fd FileDesc) bool { + switch fd.Type { + case TypeManifest: + case TypeJournal: + case TypeTable: + case TypeTemp: + default: + return false + } + return fd.Num >= 0 +} + +// Storage is the storage. A storage instance must be safe for concurrent use. +type Storage interface { + // Lock locks the storage. Any subsequent attempt to call Lock will fail + // until the last lock released. + // Caller should call Unlock method after use. + Lock() (Locker, error) + + // Log logs a string. This is used for logging. + // An implementation may write to a file, stdout or simply do nothing. + Log(str string) + + // SetMeta store 'file descriptor' that can later be acquired using GetMeta + // method. The 'file descriptor' should point to a valid file. + // SetMeta should be implemented in such way that changes should happen + // atomically. + SetMeta(fd FileDesc) error + + // GetMeta returns 'file descriptor' stored in meta. The 'file descriptor' + // can be updated using SetMeta method. + // Returns os.ErrNotExist if meta doesn't store any 'file descriptor', or + // 'file descriptor' point to nonexistent file. + GetMeta() (FileDesc, error) + + // List returns file descriptors that match the given file types. + // The file types may be OR'ed together. + List(ft FileType) ([]FileDesc, error) + + // Open opens file with the given 'file descriptor' read-only. + // Returns os.ErrNotExist error if the file does not exist. + // Returns ErrClosed if the underlying storage is closed. + Open(fd FileDesc) (Reader, error) + + // Create creates file with the given 'file descriptor', truncate if already + // exist and opens write-only. + // Returns ErrClosed if the underlying storage is closed. + Create(fd FileDesc) (Writer, error) + + // Remove removes file with the given 'file descriptor'. + // Returns ErrClosed if the underlying storage is closed. + Remove(fd FileDesc) error + + // Rename renames file from oldfd to newfd. + // Returns ErrClosed if the underlying storage is closed. + Rename(oldfd, newfd FileDesc) error + + // Close closes the storage. + // It is valid to call Close multiple times. Other methods should not be + // called after the storage has been closed. + Close() error +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go new file mode 100644 index 0000000000..1fac60d050 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go @@ -0,0 +1,531 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sort" + "sync/atomic" + + "github.com/syndtr/goleveldb/leveldb/cache" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/table" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// tFile holds basic information about a table. +type tFile struct { + fd storage.FileDesc + seekLeft int32 + size int64 + imin, imax internalKey +} + +// Returns true if given key is after largest key of this table. +func (t *tFile) after(icmp *iComparer, ukey []byte) bool { + return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 +} + +// Returns true if given key is before smallest key of this table. +func (t *tFile) before(icmp *iComparer, ukey []byte) bool { + return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 +} + +// Returns true if given key range overlaps with this table key range. +func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { + return !t.after(icmp, umin) && !t.before(icmp, umax) +} + +// Cosumes one seek and return current seeks left. +func (t *tFile) consumeSeek() int32 { + return atomic.AddInt32(&t.seekLeft, -1) +} + +// Creates new tFile. +func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { + f := &tFile{ + fd: fd, + size: size, + imin: imin, + imax: imax, + } + + // We arrange to automatically compact this file after + // a certain number of seeks. Let's assume: + // (1) One seek costs 10ms + // (2) Writing or reading 1MB costs 10ms (100MB/s) + // (3) A compaction of 1MB does 25MB of IO: + // 1MB read from this level + // 10-12MB read from next level (boundaries may be misaligned) + // 10-12MB written to next level + // This implies that 25 seeks cost the same as the compaction + // of 1MB of data. I.e., one seek costs approximately the + // same as the compaction of 40KB of data. We are a little + // conservative and allow approximately one seek for every 16KB + // of data before triggering a compaction. + f.seekLeft = int32(size / 16384) + if f.seekLeft < 100 { + f.seekLeft = 100 + } + + return f +} + +func tableFileFromRecord(r atRecord) *tFile { + return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax) +} + +// tFiles hold multiple tFile. +type tFiles []*tFile + +func (tf tFiles) Len() int { return len(tf) } +func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } + +func (tf tFiles) nums() string { + x := "[ " + for i, f := range tf { + if i != 0 { + x += ", " + } + x += fmt.Sprint(f.fd.Num) + } + x += " ]" + return x +} + +// Returns true if i smallest key is less than j. +// This used for sort by key in ascending order. +func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { + a, b := tf[i], tf[j] + n := icmp.Compare(a.imin, b.imin) + if n == 0 { + return a.fd.Num < b.fd.Num + } + return n < 0 +} + +// Returns true if i file number is greater than j. +// This used for sort by file number in descending order. +func (tf tFiles) lessByNum(i, j int) bool { + return tf[i].fd.Num > tf[j].fd.Num +} + +// Sorts tables by key in ascending order. +func (tf tFiles) sortByKey(icmp *iComparer) { + sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) +} + +// Sorts tables by file number in descending order. +func (tf tFiles) sortByNum() { + sort.Sort(&tFilesSortByNum{tFiles: tf}) +} + +// Returns sum of all tables size. +func (tf tFiles) size() (sum int64) { + for _, t := range tf { + sum += t.size + } + return sum +} + +// Searches smallest index of tables whose its smallest +// key is after or equal with given key. +func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { + return sort.Search(len(tf), func(i int) bool { + return icmp.Compare(tf[i].imin, ikey) >= 0 + }) +} + +// Searches smallest index of tables whose its largest +// key is after or equal with given key. +func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { + return sort.Search(len(tf), func(i int) bool { + return icmp.Compare(tf[i].imax, ikey) >= 0 + }) +} + +// Returns true if given key range overlaps with one or more +// tables key range. If unsorted is true then binary search will not be used. +func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { + if unsorted { + // Check against all files. + for _, t := range tf { + if t.overlaps(icmp, umin, umax) { + return true + } + } + return false + } + + i := 0 + if len(umin) > 0 { + // Find the earliest possible internal key for min. + i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) + } + if i >= len(tf) { + // Beginning of range is after all files, so no overlap. + return false + } + return !tf[i].before(icmp, umax) +} + +// Returns tables whose its key range overlaps with given key range. +// Range will be expanded if ukey found hop across tables. +// If overlapped is true then the search will be restarted if umax +// expanded. +// The dst content will be overwritten. +func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { + dst = dst[:0] + for i := 0; i < len(tf); { + t := tf[i] + if t.overlaps(icmp, umin, umax) { + if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { + umin = t.imin.ukey() + dst = dst[:0] + i = 0 + continue + } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { + umax = t.imax.ukey() + // Restart search if it is overlapped. + if overlapped { + dst = dst[:0] + i = 0 + continue + } + } + + dst = append(dst, t) + } + i++ + } + + return dst +} + +// Returns tables key range. +func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { + for i, t := range tf { + if i == 0 { + imin, imax = t.imin, t.imax + continue + } + if icmp.Compare(t.imin, imin) < 0 { + imin = t.imin + } + if icmp.Compare(t.imax, imax) > 0 { + imax = t.imax + } + } + + return +} + +// Creates iterator index from tables. +func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { + if slice != nil { + var start, limit int + if slice.Start != nil { + start = tf.searchMax(icmp, internalKey(slice.Start)) + } + if slice.Limit != nil { + limit = tf.searchMin(icmp, internalKey(slice.Limit)) + } else { + limit = tf.Len() + } + tf = tf[start:limit] + } + return iterator.NewArrayIndexer(&tFilesArrayIndexer{ + tFiles: tf, + tops: tops, + icmp: icmp, + slice: slice, + ro: ro, + }) +} + +// Tables iterator index. +type tFilesArrayIndexer struct { + tFiles + tops *tOps + icmp *iComparer + slice *util.Range + ro *opt.ReadOptions +} + +func (a *tFilesArrayIndexer) Search(key []byte) int { + return a.searchMax(a.icmp, internalKey(key)) +} + +func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { + if i == 0 || i == a.Len()-1 { + return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) + } + return a.tops.newIterator(a.tFiles[i], nil, a.ro) +} + +// Helper type for sortByKey. +type tFilesSortByKey struct { + tFiles + icmp *iComparer +} + +func (x *tFilesSortByKey) Less(i, j int) bool { + return x.lessByKey(x.icmp, i, j) +} + +// Helper type for sortByNum. +type tFilesSortByNum struct { + tFiles +} + +func (x *tFilesSortByNum) Less(i, j int) bool { + return x.lessByNum(i, j) +} + +// Table operations. +type tOps struct { + s *session + noSync bool + evictRemoved bool + cache *cache.Cache + bcache *cache.Cache + bpool *util.BufferPool +} + +// Creates an empty table and returns table writer. +func (t *tOps) create() (*tWriter, error) { + fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()} + fw, err := t.s.stor.Create(fd) + if err != nil { + return nil, err + } + return &tWriter{ + t: t, + fd: fd, + w: fw, + tw: table.NewWriter(fw, t.s.o.Options), + }, nil +} + +// Builds table from src iterator. +func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { + w, err := t.create() + if err != nil { + return + } + + defer func() { + if err != nil { + w.drop() + } + }() + + for src.Next() { + err = w.append(src.Key(), src.Value()) + if err != nil { + return + } + } + err = src.Error() + if err != nil { + return + } + + n = w.tw.EntriesLen() + f, err = w.finish() + return +} + +// Opens table. It returns a cache handle, which should +// be released after use. +func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { + ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { + var r storage.Reader + r, err = t.s.stor.Open(f.fd) + if err != nil { + return 0, nil + } + + var bcache *cache.NamespaceGetter + if t.bcache != nil { + bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} + } + + var tr *table.Reader + tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) + if err != nil { + r.Close() + return 0, nil + } + return 1, tr + + }) + if ch == nil && err == nil { + err = ErrClosed + } + return +} + +// Finds key/value pair whose key is greater than or equal to the +// given key. +func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { + ch, err := t.open(f) + if err != nil { + return nil, nil, err + } + defer ch.Release() + return ch.Value().(*table.Reader).Find(key, true, ro) +} + +// Finds key that is greater than or equal to the given key. +func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { + ch, err := t.open(f) + if err != nil { + return nil, err + } + defer ch.Release() + return ch.Value().(*table.Reader).FindKey(key, true, ro) +} + +// Returns approximate offset of the given key. +func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { + ch, err := t.open(f) + if err != nil { + return + } + defer ch.Release() + return ch.Value().(*table.Reader).OffsetOf(key) +} + +// Creates an iterator from the given table. +func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + ch, err := t.open(f) + if err != nil { + return iterator.NewEmptyIterator(err) + } + iter := ch.Value().(*table.Reader).NewIterator(slice, ro) + iter.SetReleaser(ch) + return iter +} + +// Removes table from persistent storage. It waits until +// no one use the the table. +func (t *tOps) remove(f *tFile) { + t.cache.Delete(0, uint64(f.fd.Num), func() { + if err := t.s.stor.Remove(f.fd); err != nil { + t.s.logf("table@remove removing @%d %q", f.fd.Num, err) + } else { + t.s.logf("table@remove removed @%d", f.fd.Num) + } + if t.evictRemoved && t.bcache != nil { + t.bcache.EvictNS(uint64(f.fd.Num)) + } + }) +} + +// Closes the table ops instance. It will close all tables, +// regadless still used or not. +func (t *tOps) close() { + t.bpool.Close() + t.cache.Close() + if t.bcache != nil { + t.bcache.CloseWeak() + } +} + +// Creates new initialized table ops instance. +func newTableOps(s *session) *tOps { + var ( + cacher cache.Cacher + bcache *cache.Cache + bpool *util.BufferPool + ) + if s.o.GetOpenFilesCacheCapacity() > 0 { + cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) + } + if !s.o.GetDisableBlockCache() { + var bcacher cache.Cacher + if s.o.GetBlockCacheCapacity() > 0 { + bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) + } + bcache = cache.NewCache(bcacher) + } + if !s.o.GetDisableBufferPool() { + bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) + } + return &tOps{ + s: s, + noSync: s.o.GetNoSync(), + evictRemoved: s.o.GetBlockCacheEvictRemoved(), + cache: cache.NewCache(cacher), + bcache: bcache, + bpool: bpool, + } +} + +// tWriter wraps the table writer. It keep track of file descriptor +// and added key range. +type tWriter struct { + t *tOps + + fd storage.FileDesc + w storage.Writer + tw *table.Writer + + first, last []byte +} + +// Append key/value pair to the table. +func (w *tWriter) append(key, value []byte) error { + if w.first == nil { + w.first = append([]byte{}, key...) + } + w.last = append(w.last[:0], key...) + return w.tw.Append(key, value) +} + +// Returns true if the table is empty. +func (w *tWriter) empty() bool { + return w.first == nil +} + +// Closes the storage.Writer. +func (w *tWriter) close() { + if w.w != nil { + w.w.Close() + w.w = nil + } +} + +// Finalizes the table and returns table file. +func (w *tWriter) finish() (f *tFile, err error) { + defer w.close() + err = w.tw.Close() + if err != nil { + return + } + if !w.t.noSync { + err = w.w.Sync() + if err != nil { + return + } + } + f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) + return +} + +// Drops the table. +func (w *tWriter) drop() { + w.close() + w.t.s.stor.Remove(w.fd) + w.t.s.reuseFileNum(w.fd.Num) + w.tw = nil + w.first = nil + w.last = nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go new file mode 100644 index 0000000000..496feb6fb4 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go @@ -0,0 +1,1139 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "fmt" + "io" + "sort" + "strings" + "sync" + + "github.com/golang/snappy" + + "github.com/syndtr/goleveldb/leveldb/cache" + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// Reader errors. +var ( + ErrNotFound = errors.ErrNotFound + ErrReaderReleased = errors.New("leveldb/table: reader released") + ErrIterReleased = errors.New("leveldb/table: iterator released") +) + +// ErrCorrupted describes error due to corruption. This error will be wrapped +// with errors.ErrCorrupted. +type ErrCorrupted struct { + Pos int64 + Size int64 + Kind string + Reason string +} + +func (e *ErrCorrupted) Error() string { + return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason) +} + +func max(x, y int) int { + if x > y { + return x + } + return y +} + +type block struct { + bpool *util.BufferPool + bh blockHandle + data []byte + restartsLen int + restartsOffset int +} + +func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) { + index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { + offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) + offset++ // shared always zero, since this is a restart point + v1, n1 := binary.Uvarint(b.data[offset:]) // key length + _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length + m := offset + n1 + n2 + return cmp.Compare(b.data[m:m+int(v1)], key) > 0 + }) + rstart - 1 + if index < rstart { + // The smallest key is greater-than key sought. + index = rstart + } + offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) + return +} + +func (b *block) restartIndex(rstart, rlimit, offset int) int { + return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { + return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset + }) + rstart - 1 +} + +func (b *block) restartOffset(index int) int { + return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) +} + +func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { + if offset >= b.restartsOffset { + if offset != b.restartsOffset { + err = &ErrCorrupted{Reason: "entries offset not aligned"} + } + return + } + v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length + v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length + v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length + m := n0 + n1 + n2 + n = m + int(v1) + int(v2) + if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { + err = &ErrCorrupted{Reason: "entries corrupted"} + return + } + key = b.data[offset+m : offset+m+int(v1)] + value = b.data[offset+m+int(v1) : offset+n] + nShared = int(v0) + return +} + +func (b *block) Release() { + b.bpool.Put(b.data) + b.bpool = nil + b.data = nil +} + +type dir int + +const ( + dirReleased dir = iota - 1 + dirSOI + dirEOI + dirBackward + dirForward +) + +type blockIter struct { + tr *Reader + block *block + blockReleaser util.Releaser + releaser util.Releaser + key, value []byte + offset int + // Previous offset, only filled by Next. + prevOffset int + prevNode []int + prevKeys []byte + restartIndex int + // Iterator direction. + dir dir + // Restart index slice range. + riStart int + riLimit int + // Offset slice range. + offsetStart int + offsetRealStart int + offsetLimit int + // Error. + err error +} + +func (i *blockIter) sErr(err error) { + i.err = err + i.key = nil + i.value = nil + i.prevNode = nil + i.prevKeys = nil +} + +func (i *blockIter) reset() { + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.restartIndex = i.riStart + i.offset = i.offsetStart + i.dir = dirSOI + i.key = i.key[:0] + i.value = nil +} + +func (i *blockIter) isFirst() bool { + switch i.dir { + case dirForward: + return i.prevOffset == i.offsetRealStart + case dirBackward: + return len(i.prevNode) == 1 && i.restartIndex == i.riStart + } + return false +} + +func (i *blockIter) isLast() bool { + switch i.dir { + case dirForward, dirBackward: + return i.offset == i.offsetLimit + } + return false +} + +func (i *blockIter) First() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.dir = dirSOI + return i.Next() +} + +func (i *blockIter) Last() bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + i.dir = dirEOI + return i.Prev() +} + +func (i *blockIter) Seek(key []byte) bool { + if i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key) + if err != nil { + i.sErr(err) + return false + } + i.restartIndex = ri + i.offset = max(i.offsetStart, offset) + if i.dir == dirSOI || i.dir == dirEOI { + i.dir = dirForward + } + for i.Next() { + if i.tr.cmp.Compare(i.key, key) >= 0 { + return true + } + } + return false +} + +func (i *blockIter) Next() bool { + if i.dir == dirEOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + if i.dir == dirSOI { + i.restartIndex = i.riStart + i.offset = i.offsetStart + } else if i.dir == dirBackward { + i.prevNode = i.prevNode[:0] + i.prevKeys = i.prevKeys[:0] + } + for i.offset < i.offsetRealStart { + key, value, nShared, n, err := i.block.entry(i.offset) + if err != nil { + i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) + return false + } + if n == 0 { + i.dir = dirEOI + return false + } + i.key = append(i.key[:nShared], key...) + i.value = value + i.offset += n + } + if i.offset >= i.offsetLimit { + i.dir = dirEOI + if i.offset != i.offsetLimit { + i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) + } + return false + } + key, value, nShared, n, err := i.block.entry(i.offset) + if err != nil { + i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) + return false + } + if n == 0 { + i.dir = dirEOI + return false + } + i.key = append(i.key[:nShared], key...) + i.value = value + i.prevOffset = i.offset + i.offset += n + i.dir = dirForward + return true +} + +func (i *blockIter) Prev() bool { + if i.dir == dirSOI || i.err != nil { + return false + } else if i.dir == dirReleased { + i.err = ErrIterReleased + return false + } + + var ri int + if i.dir == dirForward { + // Change direction. + i.offset = i.prevOffset + if i.offset == i.offsetRealStart { + i.dir = dirSOI + return false + } + ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) + i.dir = dirBackward + } else if i.dir == dirEOI { + // At the end of iterator. + i.restartIndex = i.riLimit + i.offset = i.offsetLimit + if i.offset == i.offsetRealStart { + i.dir = dirSOI + return false + } + ri = i.riLimit - 1 + i.dir = dirBackward + } else if len(i.prevNode) == 1 { + // This is the end of a restart range. + i.offset = i.prevNode[0] + i.prevNode = i.prevNode[:0] + if i.restartIndex == i.riStart { + i.dir = dirSOI + return false + } + i.restartIndex-- + ri = i.restartIndex + } else { + // In the middle of restart range, get from cache. + n := len(i.prevNode) - 3 + node := i.prevNode[n:] + i.prevNode = i.prevNode[:n] + // Get the key. + ko := node[0] + i.key = append(i.key[:0], i.prevKeys[ko:]...) + i.prevKeys = i.prevKeys[:ko] + // Get the value. + vo := node[1] + vl := vo + node[2] + i.value = i.block.data[vo:vl] + i.offset = vl + return true + } + // Build entries cache. + i.key = i.key[:0] + i.value = nil + offset := i.block.restartOffset(ri) + if offset == i.offset { + ri-- + if ri < 0 { + i.dir = dirSOI + return false + } + offset = i.block.restartOffset(ri) + } + i.prevNode = append(i.prevNode, offset) + for { + key, value, nShared, n, err := i.block.entry(offset) + if err != nil { + i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) + return false + } + if offset >= i.offsetRealStart { + if i.value != nil { + // Appends 3 variables: + // 1. Previous keys offset + // 2. Value offset in the data block + // 3. Value length + i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) + i.prevKeys = append(i.prevKeys, i.key...) + } + i.value = value + } + i.key = append(i.key[:nShared], key...) + offset += n + // Stop if target offset reached. + if offset >= i.offset { + if offset != i.offset { + i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) + return false + } + + break + } + } + i.restartIndex = ri + i.offset = offset + return true +} + +func (i *blockIter) Key() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.key +} + +func (i *blockIter) Value() []byte { + if i.err != nil || i.dir <= dirEOI { + return nil + } + return i.value +} + +func (i *blockIter) Release() { + if i.dir != dirReleased { + i.tr = nil + i.block = nil + i.prevNode = nil + i.prevKeys = nil + i.key = nil + i.value = nil + i.dir = dirReleased + if i.blockReleaser != nil { + i.blockReleaser.Release() + i.blockReleaser = nil + } + if i.releaser != nil { + i.releaser.Release() + i.releaser = nil + } + } +} + +func (i *blockIter) SetReleaser(releaser util.Releaser) { + if i.dir == dirReleased { + panic(util.ErrReleased) + } + if i.releaser != nil && releaser != nil { + panic(util.ErrHasReleaser) + } + i.releaser = releaser +} + +func (i *blockIter) Valid() bool { + return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) +} + +func (i *blockIter) Error() error { + return i.err +} + +type filterBlock struct { + bpool *util.BufferPool + data []byte + oOffset int + baseLg uint + filtersNum int +} + +func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool { + i := int(offset >> b.baseLg) + if i < b.filtersNum { + o := b.data[b.oOffset+i*4:] + n := int(binary.LittleEndian.Uint32(o)) + m := int(binary.LittleEndian.Uint32(o[4:])) + if n < m && m <= b.oOffset { + return filter.Contains(b.data[n:m], key) + } else if n == m { + return false + } + } + return true +} + +func (b *filterBlock) Release() { + b.bpool.Put(b.data) + b.bpool = nil + b.data = nil +} + +type indexIter struct { + *blockIter + tr *Reader + slice *util.Range + // Options + fillCache bool +} + +func (i *indexIter) Get() iterator.Iterator { + value := i.Value() + if value == nil { + return nil + } + dataBH, n := decodeBlockHandle(value) + if n == 0 { + return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle")) + } + + var slice *util.Range + if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { + slice = i.slice + } + return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache) +} + +// Reader is a table reader. +type Reader struct { + mu sync.RWMutex + fd storage.FileDesc + reader io.ReaderAt + cache *cache.NamespaceGetter + err error + bpool *util.BufferPool + // Options + o *opt.Options + cmp comparer.Comparer + filter filter.Filter + verifyChecksum bool + + dataEnd int64 + metaBH, indexBH, filterBH blockHandle + indexBlock *block + filterBlock *filterBlock +} + +func (r *Reader) blockKind(bh blockHandle) string { + switch bh.offset { + case r.metaBH.offset: + return "meta-block" + case r.indexBH.offset: + return "index-block" + case r.filterBH.offset: + if r.filterBH.length > 0 { + return "filter-block" + } + } + return "data-block" +} + +func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { + return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} +} + +func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { + return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason) +} + +func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error { + if cerr, ok := err.(*ErrCorrupted); ok { + cerr.Pos = int64(bh.offset) + cerr.Size = int64(bh.length) + cerr.Kind = r.blockKind(bh) + return &errors.ErrCorrupted{Fd: r.fd, Err: cerr} + } + return err +} + +func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) { + data := r.bpool.Get(int(bh.length + blockTrailerLen)) + if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { + return nil, err + } + + if verifyChecksum { + n := bh.length + 1 + checksum0 := binary.LittleEndian.Uint32(data[n:]) + checksum1 := util.NewCRC(data[:n]).Value() + if checksum0 != checksum1 { + r.bpool.Put(data) + return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1)) + } + } + + switch data[bh.length] { + case blockTypeNoCompression: + data = data[:bh.length] + case blockTypeSnappyCompression: + decLen, err := snappy.DecodedLen(data[:bh.length]) + if err != nil { + r.bpool.Put(data) + return nil, r.newErrCorruptedBH(bh, err.Error()) + } + decData := r.bpool.Get(decLen) + decData, err = snappy.Decode(decData, data[:bh.length]) + r.bpool.Put(data) + if err != nil { + r.bpool.Put(decData) + return nil, r.newErrCorruptedBH(bh, err.Error()) + } + data = decData + default: + r.bpool.Put(data) + return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length])) + } + return data, nil +} + +func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) { + data, err := r.readRawBlock(bh, verifyChecksum) + if err != nil { + return nil, err + } + restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) + b := &block{ + bpool: r.bpool, + bh: bh, + data: data, + restartsLen: restartsLen, + restartsOffset: len(data) - (restartsLen+1)*4, + } + return b, nil +} + +func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) { + if r.cache != nil { + var ( + err error + ch *cache.Handle + ) + if fillCache { + ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { + var b *block + b, err = r.readBlock(bh, verifyChecksum) + if err != nil { + return 0, nil + } + return cap(b.data), b + }) + } else { + ch = r.cache.Get(bh.offset, nil) + } + if ch != nil { + b, ok := ch.Value().(*block) + if !ok { + ch.Release() + return nil, nil, errors.New("leveldb/table: inconsistent block type") + } + return b, ch, err + } else if err != nil { + return nil, nil, err + } + } + + b, err := r.readBlock(bh, verifyChecksum) + return b, b, err +} + +func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { + data, err := r.readRawBlock(bh, true) + if err != nil { + return nil, err + } + n := len(data) + if n < 5 { + return nil, r.newErrCorruptedBH(bh, "too short") + } + m := n - 5 + oOffset := int(binary.LittleEndian.Uint32(data[m:])) + if oOffset > m { + return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset") + } + b := &filterBlock{ + bpool: r.bpool, + data: data, + oOffset: oOffset, + baseLg: uint(data[n-1]), + filtersNum: (m - oOffset) / 4, + } + return b, nil +} + +func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { + if r.cache != nil { + var ( + err error + ch *cache.Handle + ) + if fillCache { + ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { + var b *filterBlock + b, err = r.readFilterBlock(bh) + if err != nil { + return 0, nil + } + return cap(b.data), b + }) + } else { + ch = r.cache.Get(bh.offset, nil) + } + if ch != nil { + b, ok := ch.Value().(*filterBlock) + if !ok { + ch.Release() + return nil, nil, errors.New("leveldb/table: inconsistent block type") + } + return b, ch, err + } else if err != nil { + return nil, nil, err + } + } + + b, err := r.readFilterBlock(bh) + return b, b, err +} + +func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) { + if r.indexBlock == nil { + return r.readBlockCached(r.indexBH, true, fillCache) + } + return r.indexBlock, util.NoopReleaser{}, nil +} + +func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) { + if r.filterBlock == nil { + return r.readFilterBlockCached(r.filterBH, fillCache) + } + return r.filterBlock, util.NoopReleaser{}, nil +} + +func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter { + bi := &blockIter{ + tr: r, + block: b, + blockReleaser: bReleaser, + // Valid key should never be nil. + key: make([]byte, 0), + dir: dirSOI, + riStart: 0, + riLimit: b.restartsLen, + offsetStart: 0, + offsetRealStart: 0, + offsetLimit: b.restartsOffset, + } + if slice != nil { + if slice.Start != nil { + if bi.Seek(slice.Start) { + bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) + bi.offsetStart = b.restartOffset(bi.riStart) + bi.offsetRealStart = bi.prevOffset + } else { + bi.riStart = b.restartsLen + bi.offsetStart = b.restartsOffset + bi.offsetRealStart = b.restartsOffset + } + } + if slice.Limit != nil { + if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { + bi.offsetLimit = bi.prevOffset + bi.riLimit = bi.restartIndex + 1 + } + } + bi.reset() + if bi.offsetStart > bi.offsetLimit { + bi.sErr(errors.New("leveldb/table: invalid slice range")) + } + } + return bi +} + +func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { + b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache) + if err != nil { + return iterator.NewEmptyIterator(err) + } + return r.newBlockIter(b, rel, slice, false) +} + +func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + return iterator.NewEmptyIterator(r.err) + } + + return r.getDataIter(dataBH, slice, verifyChecksum, fillCache) +} + +// NewIterator creates an iterator from the table. +// +// Slice allows slicing the iterator to only contains keys in the given +// range. A nil Range.Start is treated as a key before all keys in the +// table. And a nil Range.Limit is treated as a key after all keys in +// the table. +// +// WARNING: Any slice returned by interator (e.g. slice returned by calling +// Iterator.Key() or Iterator.Key() methods), its content should not be modified +// unless noted otherwise. +// +// The returned iterator is not safe for concurrent use and should be released +// after use. +// +// Also read Iterator documentation of the leveldb/iterator package. +func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + return iterator.NewEmptyIterator(r.err) + } + + fillCache := !ro.GetDontFillCache() + indexBlock, rel, err := r.getIndexBlock(fillCache) + if err != nil { + return iterator.NewEmptyIterator(err) + } + index := &indexIter{ + blockIter: r.newBlockIter(indexBlock, rel, slice, true), + tr: r, + slice: slice, + fillCache: !ro.GetDontFillCache(), + } + return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader)) +} + +func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + err = r.err + return + } + + indexBlock, rel, err := r.getIndexBlock(true) + if err != nil { + return + } + defer rel.Release() + + index := r.newBlockIter(indexBlock, nil, nil, true) + defer index.Release() + + if !index.Seek(key) { + if err = index.Error(); err == nil { + err = ErrNotFound + } + return + } + + dataBH, n := decodeBlockHandle(index.Value()) + if n == 0 { + r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") + return nil, nil, r.err + } + + // The filter should only used for exact match. + if filtered && r.filter != nil { + filterBlock, frel, ferr := r.getFilterBlock(true) + if ferr == nil { + if !filterBlock.contains(r.filter, dataBH.offset, key) { + frel.Release() + return nil, nil, ErrNotFound + } + frel.Release() + } else if !errors.IsCorrupted(ferr) { + return nil, nil, ferr + } + } + + data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) + if !data.Seek(key) { + data.Release() + if err = data.Error(); err != nil { + return + } + + // The nearest greater-than key is the first key of the next block. + if !index.Next() { + if err = index.Error(); err == nil { + err = ErrNotFound + } + return + } + + dataBH, n = decodeBlockHandle(index.Value()) + if n == 0 { + r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") + return nil, nil, r.err + } + + data = r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) + if !data.Next() { + data.Release() + if err = data.Error(); err == nil { + err = ErrNotFound + } + return + } + } + + // Key doesn't use block buffer, no need to copy the buffer. + rkey = data.Key() + if !noValue { + if r.bpool == nil { + value = data.Value() + } else { + // Value does use block buffer, and since the buffer will be + // recycled, it need to be copied. + value = append([]byte{}, data.Value()...) + } + } + data.Release() + return +} + +// Find finds key/value pair whose key is greater than or equal to the +// given key. It returns ErrNotFound if the table doesn't contain +// such pair. +// If filtered is true then the nearest 'block' will be checked against +// 'filter data' (if present) and will immediately return ErrNotFound if +// 'filter data' indicates that such pair doesn't exist. +// +// The caller may modify the contents of the returned slice as it is its +// own copy. +// It is safe to modify the contents of the argument after Find returns. +func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) { + return r.find(key, filtered, ro, false) +} + +// FindKey finds key that is greater than or equal to the given key. +// It returns ErrNotFound if the table doesn't contain such key. +// If filtered is true then the nearest 'block' will be checked against +// 'filter data' (if present) and will immediately return ErrNotFound if +// 'filter data' indicates that such key doesn't exist. +// +// The caller may modify the contents of the returned slice as it is its +// own copy. +// It is safe to modify the contents of the argument after Find returns. +func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) { + rkey, _, err = r.find(key, filtered, ro, true) + return +} + +// Get gets the value for the given key. It returns errors.ErrNotFound +// if the table does not contain the key. +// +// The caller may modify the contents of the returned slice as it is its +// own copy. +// It is safe to modify the contents of the argument after Find returns. +func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + err = r.err + return + } + + rkey, value, err := r.find(key, false, ro, false) + if err == nil && r.cmp.Compare(rkey, key) != 0 { + value = nil + err = ErrNotFound + } + return +} + +// OffsetOf returns approximate offset for the given key. +// +// It is safe to modify the contents of the argument after Get returns. +func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.err != nil { + err = r.err + return + } + + indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) + if err != nil { + return + } + defer rel.Release() + + index := r.newBlockIter(indexBlock, nil, nil, true) + defer index.Release() + if index.Seek(key) { + dataBH, n := decodeBlockHandle(index.Value()) + if n == 0 { + r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") + return + } + offset = int64(dataBH.offset) + return + } + err = index.Error() + if err == nil { + offset = r.dataEnd + } + return +} + +// Release implements util.Releaser. +// It also close the file if it is an io.Closer. +func (r *Reader) Release() { + r.mu.Lock() + defer r.mu.Unlock() + + if closer, ok := r.reader.(io.Closer); ok { + closer.Close() + } + if r.indexBlock != nil { + r.indexBlock.Release() + r.indexBlock = nil + } + if r.filterBlock != nil { + r.filterBlock.Release() + r.filterBlock = nil + } + r.reader = nil + r.cache = nil + r.bpool = nil + r.err = ErrReaderReleased +} + +// NewReader creates a new initialized table reader for the file. +// The fi, cache and bpool is optional and can be nil. +// +// The returned table reader instance is safe for concurrent use. +func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { + if f == nil { + return nil, errors.New("leveldb/table: nil file") + } + + r := &Reader{ + fd: fd, + reader: f, + cache: cache, + bpool: bpool, + o: o, + cmp: o.GetComparer(), + verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), + } + + if size < footerLen { + r.err = r.newErrCorrupted(0, size, "table", "too small") + return r, nil + } + + footerPos := size - footerLen + var footer [footerLen]byte + if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { + return nil, err + } + if string(footer[footerLen-len(magic):footerLen]) != magic { + r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") + return r, nil + } + + var n int + // Decode the metaindex block handle. + r.metaBH, n = decodeBlockHandle(footer[:]) + if n == 0 { + r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") + return r, nil + } + + // Decode the index block handle. + r.indexBH, n = decodeBlockHandle(footer[n:]) + if n == 0 { + r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") + return r, nil + } + + // Read metaindex block. + metaBlock, err := r.readBlock(r.metaBH, true) + if err != nil { + if errors.IsCorrupted(err) { + r.err = err + return r, nil + } + return nil, err + } + + // Set data end. + r.dataEnd = int64(r.metaBH.offset) + + // Read metaindex. + metaIter := r.newBlockIter(metaBlock, nil, nil, true) + for metaIter.Next() { + key := string(metaIter.Key()) + if !strings.HasPrefix(key, "filter.") { + continue + } + fn := key[7:] + if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { + r.filter = f0 + } else { + for _, f0 := range o.GetAltFilters() { + if f0.Name() == fn { + r.filter = f0 + break + } + } + } + if r.filter != nil { + filterBH, n := decodeBlockHandle(metaIter.Value()) + if n == 0 { + continue + } + r.filterBH = filterBH + // Update data end. + r.dataEnd = int64(filterBH.offset) + break + } + } + metaIter.Release() + metaBlock.Release() + + // Cache index and filter block locally, since we don't have global cache. + if cache == nil { + r.indexBlock, err = r.readBlock(r.indexBH, true) + if err != nil { + if errors.IsCorrupted(err) { + r.err = err + return r, nil + } + return nil, err + } + if r.filter != nil { + r.filterBlock, err = r.readFilterBlock(r.filterBH) + if err != nil { + if !errors.IsCorrupted(err) { + return nil, err + } + + // Don't use filter then. + r.filter = nil + } + } + } + + return r, nil +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go new file mode 100644 index 0000000000..beacdc1f02 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go @@ -0,0 +1,177 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package table allows read and write sorted key/value. +package table + +import ( + "encoding/binary" +) + +/* +Table: + +Table is consist of one or more data blocks, an optional filter block +a metaindex block, an index block and a table footer. Metaindex block +is a special block used to keep parameters of the table, such as filter +block name and its block handle. Index block is a special block used to +keep record of data blocks offset and length, index block use one as +restart interval. The key used by index block are the last key of preceding +block, shorter separator of adjacent blocks or shorter successor of the +last key of the last block. Filter block is an optional block contains +sequence of filter data generated by a filter generator. + +Table data structure: + + optional + / + +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ + | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | + +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ + + Each block followed by a 5-bytes trailer contains compression type and checksum. + +Table block trailer: + + +---------------------------+-------------------+ + | compression type (1-byte) | checksum (4-byte) | + +---------------------------+-------------------+ + + The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression + type also included in the checksum. + +Table footer: + + +------------------- 40-bytes -------------------+ + / \ + +------------------------+--------------------+------+-----------------+ + | metaindex block handle / index block handle / ---- | magic (8-bytes) | + +------------------------+--------------------+------+-----------------+ + + The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". + +NOTE: All fixed-length integer are little-endian. +*/ + +/* +Block: + +Block is consist of one or more key/value entries and a block trailer. +Block entry shares key prefix with its preceding key until a restart +point reached. A block should contains at least one restart point. +First restart point are always zero. + +Block data structure: + + + restart point + restart point (depends on restart interval) + / / + +---------------+---------------+---------------+---------------+---------+ + | block entry 1 | block entry 2 | ... | block entry n | trailer | + +---------------+---------------+---------------+---------------+---------+ + +Key/value entry: + + +---- key len ----+ + / \ + +-------+---------+-----------+---------+--------------------+--------------+----------------+ + | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | + +-----------------+---------------------+--------------------+--------------+----------------+ + + Block entry shares key prefix with its preceding key: + Conditions: + restart_interval=2 + entry one : key=deck,value=v1 + entry two : key=dock,value=v2 + entry three: key=duck,value=v3 + The entries will be encoded as follow: + + + restart point (offset=0) + restart point (offset=16) + / / + +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ + | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | + +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ + \ / \ / \ / + +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ + + The block trailer will contains two restart points: + + +------------+-----------+--------+ + | 0 | 16 | 2 | + +------------+-----------+---+----+ + \ / \ + +-- restart points --+ + restart points length + +Block trailer: + + +-- 4-bytes --+ + / \ + +-----------------+-----------------+-----------------+------------------------------+ + | restart point 1 | .... | restart point n | restart points len (4-bytes) | + +-----------------+-----------------+-----------------+------------------------------+ + + +NOTE: All fixed-length integer are little-endian. +*/ + +/* +Filter block: + +Filter block consist of one or more filter data and a filter block trailer. +The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. + +Filter block data structure: + + + offset 1 + offset 2 + offset n + trailer offset + / / / / + +---------------+---------------+---------------+---------+ + | filter data 1 | ... | filter data n | trailer | + +---------------+---------------+---------------+---------+ + +Filter block trailer: + + +- 4-bytes -+ + / \ + +---------------+---------------+---------------+-------------------------------+------------------+ + | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) | + +-------------- +---------------+---------------+-------------------------------+------------------+ + + +NOTE: All fixed-length integer are little-endian. +*/ + +const ( + blockTrailerLen = 5 + footerLen = 48 + + magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" + + // The block type gives the per-block compression format. + // These constants are part of the file format and should not be changed. + blockTypeNoCompression = 0 + blockTypeSnappyCompression = 1 + + // Generate new filter every 2KB of data + filterBaseLg = 11 + filterBase = 1 << filterBaseLg +) + +type blockHandle struct { + offset, length uint64 +} + +func decodeBlockHandle(src []byte) (blockHandle, int) { + offset, n := binary.Uvarint(src) + length, m := binary.Uvarint(src[n:]) + if n == 0 || m == 0 { + return blockHandle{}, 0 + } + return blockHandle{offset, length}, n + m +} + +func encodeBlockHandle(dst []byte, b blockHandle) int { + n := binary.PutUvarint(dst, b.offset) + m := binary.PutUvarint(dst[n:], b.length) + return n + m +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go new file mode 100644 index 0000000000..b96b271d8d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go @@ -0,0 +1,375 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/golang/snappy" + + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +func sharedPrefixLen(a, b []byte) int { + i, n := 0, len(a) + if n > len(b) { + n = len(b) + } + for i < n && a[i] == b[i] { + i++ + } + return i +} + +type blockWriter struct { + restartInterval int + buf util.Buffer + nEntries int + prevKey []byte + restarts []uint32 + scratch []byte +} + +func (w *blockWriter) append(key, value []byte) { + nShared := 0 + if w.nEntries%w.restartInterval == 0 { + w.restarts = append(w.restarts, uint32(w.buf.Len())) + } else { + nShared = sharedPrefixLen(w.prevKey, key) + } + n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) + n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) + n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) + w.buf.Write(w.scratch[:n]) + w.buf.Write(key[nShared:]) + w.buf.Write(value) + w.prevKey = append(w.prevKey[:0], key...) + w.nEntries++ +} + +func (w *blockWriter) finish() { + // Write restarts entry. + if w.nEntries == 0 { + // Must have at least one restart entry. + w.restarts = append(w.restarts, 0) + } + w.restarts = append(w.restarts, uint32(len(w.restarts))) + for _, x := range w.restarts { + buf4 := w.buf.Alloc(4) + binary.LittleEndian.PutUint32(buf4, x) + } +} + +func (w *blockWriter) reset() { + w.buf.Reset() + w.nEntries = 0 + w.restarts = w.restarts[:0] +} + +func (w *blockWriter) bytesLen() int { + restartsLen := len(w.restarts) + if restartsLen == 0 { + restartsLen = 1 + } + return w.buf.Len() + 4*restartsLen + 4 +} + +type filterWriter struct { + generator filter.FilterGenerator + buf util.Buffer + nKeys int + offsets []uint32 +} + +func (w *filterWriter) add(key []byte) { + if w.generator == nil { + return + } + w.generator.Add(key) + w.nKeys++ +} + +func (w *filterWriter) flush(offset uint64) { + if w.generator == nil { + return + } + for x := int(offset / filterBase); x > len(w.offsets); { + w.generate() + } +} + +func (w *filterWriter) finish() { + if w.generator == nil { + return + } + // Generate last keys. + + if w.nKeys > 0 { + w.generate() + } + w.offsets = append(w.offsets, uint32(w.buf.Len())) + for _, x := range w.offsets { + buf4 := w.buf.Alloc(4) + binary.LittleEndian.PutUint32(buf4, x) + } + w.buf.WriteByte(filterBaseLg) +} + +func (w *filterWriter) generate() { + // Record offset. + w.offsets = append(w.offsets, uint32(w.buf.Len())) + // Generate filters. + if w.nKeys > 0 { + w.generator.Generate(&w.buf) + w.nKeys = 0 + } +} + +// Writer is a table writer. +type Writer struct { + writer io.Writer + err error + // Options + cmp comparer.Comparer + filter filter.Filter + compression opt.Compression + blockSize int + + dataBlock blockWriter + indexBlock blockWriter + filterBlock filterWriter + pendingBH blockHandle + offset uint64 + nEntries int + // Scratch allocated enough for 5 uvarint. Block writer should not use + // first 20-bytes since it will be used to encode block handle, which + // then passed to the block writer itself. + scratch [50]byte + comparerScratch []byte + compressionScratch []byte +} + +func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { + // Compress the buffer if necessary. + var b []byte + if compression == opt.SnappyCompression { + // Allocate scratch enough for compression and block trailer. + if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { + w.compressionScratch = make([]byte, n) + } + compressed := snappy.Encode(w.compressionScratch, buf.Bytes()) + n := len(compressed) + b = compressed[:n+blockTrailerLen] + b[n] = blockTypeSnappyCompression + } else { + tmp := buf.Alloc(blockTrailerLen) + tmp[0] = blockTypeNoCompression + b = buf.Bytes() + } + + // Calculate the checksum. + n := len(b) - 4 + checksum := util.NewCRC(b[:n]).Value() + binary.LittleEndian.PutUint32(b[n:], checksum) + + // Write the buffer to the file. + _, err = w.writer.Write(b) + if err != nil { + return + } + bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} + w.offset += uint64(len(b)) + return +} + +func (w *Writer) flushPendingBH(key []byte) { + if w.pendingBH.length == 0 { + return + } + var separator []byte + if len(key) == 0 { + separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) + } else { + separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) + } + if separator == nil { + separator = w.dataBlock.prevKey + } else { + w.comparerScratch = separator + } + n := encodeBlockHandle(w.scratch[:20], w.pendingBH) + // Append the block handle to the index block. + w.indexBlock.append(separator, w.scratch[:n]) + // Reset prev key of the data block. + w.dataBlock.prevKey = w.dataBlock.prevKey[:0] + // Clear pending block handle. + w.pendingBH = blockHandle{} +} + +func (w *Writer) finishBlock() error { + w.dataBlock.finish() + bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) + if err != nil { + return err + } + w.pendingBH = bh + // Reset the data block. + w.dataBlock.reset() + // Flush the filter block. + w.filterBlock.flush(w.offset) + return nil +} + +// Append appends key/value pair to the table. The keys passed must +// be in increasing order. +// +// It is safe to modify the contents of the arguments after Append returns. +func (w *Writer) Append(key, value []byte) error { + if w.err != nil { + return w.err + } + if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { + w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) + return w.err + } + + w.flushPendingBH(key) + // Append key/value pair to the data block. + w.dataBlock.append(key, value) + // Add key to the filter block. + w.filterBlock.add(key) + + // Finish the data block if block size target reached. + if w.dataBlock.bytesLen() >= w.blockSize { + if err := w.finishBlock(); err != nil { + w.err = err + return w.err + } + } + w.nEntries++ + return nil +} + +// BlocksLen returns number of blocks written so far. +func (w *Writer) BlocksLen() int { + n := w.indexBlock.nEntries + if w.pendingBH.length > 0 { + // Includes the pending block. + n++ + } + return n +} + +// EntriesLen returns number of entries added so far. +func (w *Writer) EntriesLen() int { + return w.nEntries +} + +// BytesLen returns number of bytes written so far. +func (w *Writer) BytesLen() int { + return int(w.offset) +} + +// Close will finalize the table. Calling Append is not possible +// after Close, but calling BlocksLen, EntriesLen and BytesLen +// is still possible. +func (w *Writer) Close() error { + if w.err != nil { + return w.err + } + + // Write the last data block. Or empty data block if there + // aren't any data blocks at all. + if w.dataBlock.nEntries > 0 || w.nEntries == 0 { + if err := w.finishBlock(); err != nil { + w.err = err + return w.err + } + } + w.flushPendingBH(nil) + + // Write the filter block. + var filterBH blockHandle + w.filterBlock.finish() + if buf := &w.filterBlock.buf; buf.Len() > 0 { + filterBH, w.err = w.writeBlock(buf, opt.NoCompression) + if w.err != nil { + return w.err + } + } + + // Write the metaindex block. + if filterBH.length > 0 { + key := []byte("filter." + w.filter.Name()) + n := encodeBlockHandle(w.scratch[:20], filterBH) + w.dataBlock.append(key, w.scratch[:n]) + } + w.dataBlock.finish() + metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) + if err != nil { + w.err = err + return w.err + } + + // Write the index block. + w.indexBlock.finish() + indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) + if err != nil { + w.err = err + return w.err + } + + // Write the table footer. + footer := w.scratch[:footerLen] + for i := range footer { + footer[i] = 0 + } + n := encodeBlockHandle(footer, metaindexBH) + encodeBlockHandle(footer[n:], indexBH) + copy(footer[footerLen-len(magic):], magic) + if _, err := w.writer.Write(footer); err != nil { + w.err = err + return w.err + } + w.offset += footerLen + + w.err = errors.New("leveldb/table: writer is closed") + return nil +} + +// NewWriter creates a new initialized table writer for the file. +// +// Table writer is not safe for concurrent use. +func NewWriter(f io.Writer, o *opt.Options) *Writer { + w := &Writer{ + writer: f, + cmp: o.GetComparer(), + filter: o.GetFilter(), + compression: o.GetCompression(), + blockSize: o.GetBlockSize(), + comparerScratch: make([]byte, 0), + } + // data block + w.dataBlock.restartInterval = o.GetBlockRestartInterval() + // The first 20-bytes are used for encoding block handle. + w.dataBlock.scratch = w.scratch[20:] + // index block + w.indexBlock.restartInterval = 1 + w.indexBlock.scratch = w.scratch[20:] + // filter block + if w.filter != nil { + w.filterBlock.generator = w.filter.NewGenerator() + w.filterBlock.flush(0) + } + return w +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go new file mode 100644 index 0000000000..0e2b519e5c --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util.go @@ -0,0 +1,98 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sort" + + "github.com/syndtr/goleveldb/leveldb/storage" +) + +func shorten(str string) string { + if len(str) <= 8 { + return str + } + return str[:3] + ".." + str[len(str)-3:] +} + +var bunits = [...]string{"", "Ki", "Mi", "Gi", "Ti"} + +func shortenb(bytes int) string { + i := 0 + for ; bytes > 1024 && i < 4; i++ { + bytes /= 1024 + } + return fmt.Sprintf("%d%sB", bytes, bunits[i]) +} + +func sshortenb(bytes int) string { + if bytes == 0 { + return "~" + } + sign := "+" + if bytes < 0 { + sign = "-" + bytes *= -1 + } + i := 0 + for ; bytes > 1024 && i < 4; i++ { + bytes /= 1024 + } + return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) +} + +func sint(x int) string { + if x == 0 { + return "~" + } + sign := "+" + if x < 0 { + sign = "-" + x *= -1 + } + return fmt.Sprintf("%s%d", sign, x) +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} + +type fdSorter []storage.FileDesc + +func (p fdSorter) Len() int { + return len(p) +} + +func (p fdSorter) Less(i, j int) bool { + return p[i].Num < p[j].Num +} + +func (p fdSorter) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func sortFds(fds []storage.FileDesc) { + sort.Sort(fdSorter(fds)) +} + +func ensureBuffer(b []byte, n int) []byte { + if cap(b) < n { + return make([]byte, n) + } + return b[:n] +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go new file mode 100644 index 0000000000..21de242552 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go @@ -0,0 +1,293 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package util + +// This a copy of Go std bytes.Buffer with some modification +// and some features stripped. + +import ( + "bytes" + "io" +) + +// A Buffer is a variable-sized buffer of bytes with Read and Write methods. +// The zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. +} + +// Bytes returns a slice of the contents of the unread portion of the buffer; +// len(b.Bytes()) == b.Len(). If the caller changes the contents of the +// returned slice, the contents of the buffer will change provided there +// are no intervening method calls on the Buffer. +func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } + +// String returns the contents of the unread portion of the buffer +// as a string. If the Buffer is a nil pointer, it returns "". +func (b *Buffer) String() string { + if b == nil { + // Special case, useful in debugging. + return "" + } + return string(b.buf[b.off:]) +} + +// Len returns the number of bytes of the unread portion of the buffer; +// b.Len() == len(b.Bytes()). +func (b *Buffer) Len() int { return len(b.buf) - b.off } + +// Truncate discards all but the first n unread bytes from the buffer. +// It panics if n is negative or greater than the length of the buffer. +func (b *Buffer) Truncate(n int) { + switch { + case n < 0 || n > b.Len(): + panic("leveldb/util.Buffer: truncation out of range") + case n == 0: + // Reuse buffer space. + b.off = 0 + } + b.buf = b.buf[0 : b.off+n] +} + +// Reset resets the buffer so it has no content. +// b.Reset() is the same as b.Truncate(0). +func (b *Buffer) Reset() { b.Truncate(0) } + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) grow(n int) int { + m := b.Len() + // If buffer is empty, reset to recover space. + if m == 0 && b.off != 0 { + b.Truncate(0) + } + if len(b.buf)+n > cap(b.buf) { + var buf []byte + if b.buf == nil && n <= len(b.bootstrap) { + buf = b.bootstrap[0:] + } else if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + copy(b.buf[:], b.buf[b.off:]) + buf = b.buf[:m] + } else { + // not enough space anywhere + buf = makeSlice(2*cap(b.buf) + n) + copy(buf, b.buf[b.off:]) + } + b.buf = buf + b.off = 0 + } + b.buf = b.buf[0 : b.off+m+n] + return b.off + m +} + +// Alloc allocs n bytes of slice from the buffer, growing the buffer as +// needed. If n is negative, Alloc will panic. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) Alloc(n int) []byte { + if n < 0 { + panic("leveldb/util.Buffer.Alloc: negative count") + } + m := b.grow(n) + return b.buf[m:] +} + +// Grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After Grow(n), at least n bytes can be written to the +// buffer without another allocation. +// If n is negative, Grow will panic. +// If the buffer can't grow it will panic with bytes.ErrTooLarge. +func (b *Buffer) Grow(n int) { + if n < 0 { + panic("leveldb/util.Buffer.Grow: negative count") + } + m := b.grow(n) + b.buf = b.buf[0:m] +} + +// Write appends the contents of p to the buffer, growing the buffer as +// needed. The return value n is the length of p; err is always nil. If the +// buffer becomes too large, Write will panic with bytes.ErrTooLarge. +func (b *Buffer) Write(p []byte) (n int, err error) { + m := b.grow(len(p)) + return copy(b.buf[m:], p), nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const MinRead = 512 + +// ReadFrom reads data from r until EOF and appends it to the buffer, growing +// the buffer as needed. The return value n is the number of bytes read. Any +// error except io.EOF encountered during the read is also returned. If the +// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. +func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { + // If buffer is empty, reset to recover space. + if b.off >= len(b.buf) { + b.Truncate(0) + } + for { + if free := cap(b.buf) - len(b.buf); free < MinRead { + // not enough space at end + newBuf := b.buf + if b.off+free < MinRead { + // not enough space using beginning of buffer; + // double buffer capacity + newBuf = makeSlice(2*cap(b.buf) + MinRead) + } + copy(newBuf, b.buf[b.off:]) + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 + } + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + if e == io.EOF { + break + } + if e != nil { + return n, e + } + } + return n, nil // err is EOF, so return nil explicitly +} + +// makeSlice allocates a slice of size n. If the allocation fails, it panics +// with bytes.ErrTooLarge. +func makeSlice(n int) []byte { + // If the make fails, give a known error. + defer func() { + if recover() != nil { + panic(bytes.ErrTooLarge) + } + }() + return make([]byte, n) +} + +// WriteTo writes data to w until the buffer is drained or an error occurs. +// The return value n is the number of bytes written; it always fits into an +// int, but it is int64 to match the io.WriterTo interface. Any error +// encountered during the write is also returned. +func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { + if b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + if m > nBytes { + panic("leveldb/util.Buffer.WriteTo: invalid Write count") + } + b.off += m + n = int64(m) + if e != nil { + return n, e + } + // all bytes should have been written, by definition of + // Write method in io.Writer + if m != nBytes { + return n, io.ErrShortWrite + } + } + // Buffer is now empty; reset. + b.Truncate(0) + return +} + +// WriteByte appends the byte c to the buffer, growing the buffer as needed. +// The returned error is always nil, but is included to match bufio.Writer's +// WriteByte. If the buffer becomes too large, WriteByte will panic with +// bytes.ErrTooLarge. +func (b *Buffer) WriteByte(c byte) error { + m := b.grow(1) + b.buf[m] = c + return nil +} + +// Read reads the next len(p) bytes from the buffer or until the buffer +// is drained. The return value n is the number of bytes read. If the +// buffer has no data to return, err is io.EOF (unless len(p) is zero); +// otherwise it is nil. +func (b *Buffer) Read(p []byte) (n int, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + if len(p) == 0 { + return + } + return 0, io.EOF + } + n = copy(p, b.buf[b.off:]) + b.off += n + return +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +// If there are fewer than n bytes in the buffer, Next returns the entire buffer. +// The slice is only valid until the next call to a read or write method. +func (b *Buffer) Next(n int) []byte { + m := b.Len() + if n > m { + n = m + } + data := b.buf[b.off : b.off+n] + b.off += n + return data +} + +// ReadByte reads and returns the next byte from the buffer. +// If no byte is available, it returns error io.EOF. +func (b *Buffer) ReadByte() (c byte, err error) { + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, io.EOF + } + c = b.buf[b.off] + b.off++ + return c, nil +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { + slice, err := b.readSlice(delim) + // return a copy of slice. The buffer's backing array may + // be overwritten by later calls. + line = append(line, slice...) + return +} + +// readSlice is like ReadBytes but returns a reference to internal buffer data. +func (b *Buffer) readSlice(delim byte) (line []byte, err error) { + i := bytes.IndexByte(b.buf[b.off:], delim) + end := b.off + i + 1 + if i < 0 { + end = len(b.buf) + err = io.EOF + } + line = b.buf[b.off:end] + b.off = end + return line, err +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial +// contents. It is intended to prepare a Buffer to read existing data. It +// can also be used to size the internal buffer for writing. To do that, +// buf should have the desired capacity but a length of zero. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go new file mode 100644 index 0000000000..2f3db974a7 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go @@ -0,0 +1,239 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "fmt" + "sync" + "sync/atomic" + "time" +) + +type buffer struct { + b []byte + miss int +} + +// BufferPool is a 'buffer pool'. +type BufferPool struct { + pool [6]chan []byte + size [5]uint32 + sizeMiss [5]uint32 + sizeHalf [5]uint32 + baseline [4]int + baseline0 int + + mu sync.RWMutex + closed bool + closeC chan struct{} + + get uint32 + put uint32 + half uint32 + less uint32 + equal uint32 + greater uint32 + miss uint32 +} + +func (p *BufferPool) poolNum(n int) int { + if n <= p.baseline0 && n > p.baseline0/2 { + return 0 + } + for i, x := range p.baseline { + if n <= x { + return i + 1 + } + } + return len(p.baseline) + 1 +} + +// Get returns buffer with length of n. +func (p *BufferPool) Get(n int) []byte { + if p == nil { + return make([]byte, n) + } + + p.mu.RLock() + defer p.mu.RUnlock() + + if p.closed { + return make([]byte, n) + } + + atomic.AddUint32(&p.get, 1) + + poolNum := p.poolNum(n) + pool := p.pool[poolNum] + if poolNum == 0 { + // Fast path. + select { + case b := <-pool: + switch { + case cap(b) > n: + if cap(b)-n >= n { + atomic.AddUint32(&p.half, 1) + select { + case pool <- b: + default: + } + return make([]byte, n) + } else { + atomic.AddUint32(&p.less, 1) + return b[:n] + } + case cap(b) == n: + atomic.AddUint32(&p.equal, 1) + return b[:n] + default: + atomic.AddUint32(&p.greater, 1) + } + default: + atomic.AddUint32(&p.miss, 1) + } + + return make([]byte, n, p.baseline0) + } else { + sizePtr := &p.size[poolNum-1] + + select { + case b := <-pool: + switch { + case cap(b) > n: + if cap(b)-n >= n { + atomic.AddUint32(&p.half, 1) + sizeHalfPtr := &p.sizeHalf[poolNum-1] + if atomic.AddUint32(sizeHalfPtr, 1) == 20 { + atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) + atomic.StoreUint32(sizeHalfPtr, 0) + } else { + select { + case pool <- b: + default: + } + } + return make([]byte, n) + } else { + atomic.AddUint32(&p.less, 1) + return b[:n] + } + case cap(b) == n: + atomic.AddUint32(&p.equal, 1) + return b[:n] + default: + atomic.AddUint32(&p.greater, 1) + if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { + select { + case pool <- b: + default: + } + } + } + default: + atomic.AddUint32(&p.miss, 1) + } + + if size := atomic.LoadUint32(sizePtr); uint32(n) > size { + if size == 0 { + atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) + } else { + sizeMissPtr := &p.sizeMiss[poolNum-1] + if atomic.AddUint32(sizeMissPtr, 1) == 20 { + atomic.StoreUint32(sizePtr, uint32(n)) + atomic.StoreUint32(sizeMissPtr, 0) + } + } + return make([]byte, n) + } else { + return make([]byte, n, size) + } + } +} + +// Put adds given buffer to the pool. +func (p *BufferPool) Put(b []byte) { + if p == nil { + return + } + + p.mu.RLock() + defer p.mu.RUnlock() + + if p.closed { + return + } + + atomic.AddUint32(&p.put, 1) + + pool := p.pool[p.poolNum(cap(b))] + select { + case pool <- b: + default: + } + +} + +func (p *BufferPool) Close() { + if p == nil { + return + } + + p.mu.Lock() + if !p.closed { + p.closed = true + p.closeC <- struct{}{} + } + p.mu.Unlock() +} + +func (p *BufferPool) String() string { + if p == nil { + return "" + } + + return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", + p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) +} + +func (p *BufferPool) drain() { + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + for _, ch := range p.pool { + select { + case <-ch: + default: + } + } + case <-p.closeC: + close(p.closeC) + for _, ch := range p.pool { + close(ch) + } + return + } + } +} + +// NewBufferPool creates a new initialized 'buffer pool'. +func NewBufferPool(baseline int) *BufferPool { + if baseline <= 0 { + panic("baseline can't be <= 0") + } + p := &BufferPool{ + baseline0: baseline, + baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, + closeC: make(chan struct{}, 1), + } + for i, cap := range []int{2, 2, 4, 4, 2, 1} { + p.pool[i] = make(chan []byte, cap) + } + go p.drain() + return p +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go new file mode 100644 index 0000000000..631c9d6109 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go @@ -0,0 +1,30 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "hash/crc32" +) + +var table = crc32.MakeTable(crc32.Castagnoli) + +// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. +type CRC uint32 + +// NewCRC creates a new crc based on the given bytes. +func NewCRC(b []byte) CRC { + return CRC(0).Update(b) +} + +// Update updates the crc with the given bytes. +func (c CRC) Update(b []byte) CRC { + return CRC(crc32.Update(uint32(c), table, b)) +} + +// Value returns a masked crc. +func (c CRC) Value() uint32 { + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go new file mode 100644 index 0000000000..7f3fa4e2c7 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go @@ -0,0 +1,48 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "encoding/binary" +) + +// Hash return hash of the given data. +func Hash(data []byte, seed uint32) uint32 { + // Similar to murmur hash + const ( + m = uint32(0xc6a4a793) + r = uint32(24) + ) + var ( + h = seed ^ (uint32(len(data)) * m) + i int + ) + + for n := len(data) - len(data)%4; i < n; i += 4 { + h += binary.LittleEndian.Uint32(data[i:]) + h *= m + h ^= (h >> 16) + } + + switch len(data) - i { + default: + panic("not reached") + case 3: + h += uint32(data[i+2]) << 16 + fallthrough + case 2: + h += uint32(data[i+1]) << 8 + fallthrough + case 1: + h += uint32(data[i]) + h *= m + h ^= (h >> r) + case 0: + } + + return h +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go new file mode 100644 index 0000000000..85159583d2 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go @@ -0,0 +1,32 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +// Range is a key range. +type Range struct { + // Start of the key range, include in the range. + Start []byte + + // Limit of the key range, not include in the range. + Limit []byte +} + +// BytesPrefix returns key range that satisfy the given prefix. +// This only applicable for the standard 'bytes comparer'. +func BytesPrefix(prefix []byte) *Range { + var limit []byte + for i := len(prefix) - 1; i >= 0; i-- { + c := prefix[i] + if c < 0xff { + limit = make([]byte, i+1) + copy(limit, prefix) + limit[i] = c + 1 + break + } + } + return &Range{prefix, limit} +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go new file mode 100644 index 0000000000..80614afc58 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go @@ -0,0 +1,73 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package util provides utilities used throughout leveldb. +package util + +import ( + "errors" +) + +var ( + ErrReleased = errors.New("leveldb: resource already relesed") + ErrHasReleaser = errors.New("leveldb: releaser already defined") +) + +// Releaser is the interface that wraps the basic Release method. +type Releaser interface { + // Release releases associated resources. Release should always success + // and can be called multiple times without causing error. + Release() +} + +// ReleaseSetter is the interface that wraps the basic SetReleaser method. +type ReleaseSetter interface { + // SetReleaser associates the given releaser to the resources. The + // releaser will be called once coresponding resources released. + // Calling SetReleaser with nil will clear the releaser. + // + // This will panic if a releaser already present or coresponding + // resource is already released. Releaser should be cleared first + // before assigned a new one. + SetReleaser(releaser Releaser) +} + +// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. +type BasicReleaser struct { + releaser Releaser + released bool +} + +// Released returns whether Release method already called. +func (r *BasicReleaser) Released() bool { + return r.released +} + +// Release implements Releaser.Release. +func (r *BasicReleaser) Release() { + if !r.released { + if r.releaser != nil { + r.releaser.Release() + r.releaser = nil + } + r.released = true + } +} + +// SetReleaser implements ReleaseSetter.SetReleaser. +func (r *BasicReleaser) SetReleaser(releaser Releaser) { + if r.released { + panic(ErrReleased) + } + if r.releaser != nil && releaser != nil { + panic(ErrHasReleaser) + } + r.releaser = releaser +} + +type NoopReleaser struct{} + +func (NoopReleaser) Release() {} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version.go b/vendor/github.com/syndtr/goleveldb/leveldb/version.go new file mode 100644 index 0000000000..73f272af5f --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/version.go @@ -0,0 +1,528 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "fmt" + "sync/atomic" + "unsafe" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type tSet struct { + level int + table *tFile +} + +type version struct { + s *session + + levels []tFiles + + // Level that should be compacted next and its compaction score. + // Score < 1 means compaction is not strictly needed. These fields + // are initialized by computeCompaction() + cLevel int + cScore float64 + + cSeek unsafe.Pointer + + closing bool + ref int + released bool +} + +func newVersion(s *session) *version { + return &version{s: s} +} + +func (v *version) incref() { + if v.released { + panic("already released") + } + + v.ref++ + if v.ref == 1 { + // Incr file ref. + for _, tt := range v.levels { + for _, t := range tt { + v.s.addFileRef(t.fd, 1) + } + } + } +} + +func (v *version) releaseNB() { + v.ref-- + if v.ref > 0 { + return + } else if v.ref < 0 { + panic("negative version ref") + } + + for _, tt := range v.levels { + for _, t := range tt { + if v.s.addFileRef(t.fd, -1) == 0 { + v.s.tops.remove(t) + } + } + } + + v.released = true +} + +func (v *version) release() { + v.s.vmu.Lock() + v.releaseNB() + v.s.vmu.Unlock() +} + +func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) { + ukey := ikey.ukey() + + // Aux level. + if aux != nil { + for _, t := range aux { + if t.overlaps(v.s.icmp, ukey, ukey) { + if !f(-1, t) { + return + } + } + } + + if lf != nil && !lf(-1) { + return + } + } + + // Walk tables level-by-level. + for level, tables := range v.levels { + if len(tables) == 0 { + continue + } + + if level == 0 { + // Level-0 files may overlap each other. Find all files that + // overlap ukey. + for _, t := range tables { + if t.overlaps(v.s.icmp, ukey, ukey) { + if !f(level, t) { + return + } + } + } + } else { + if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { + t := tables[i] + if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { + if !f(level, t) { + return + } + } + } + } + + if lf != nil && !lf(level) { + return + } + } +} + +func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { + if v.closing { + return nil, false, ErrClosed + } + + ukey := ikey.ukey() + + var ( + tset *tSet + tseek bool + + // Level-0. + zfound bool + zseq uint64 + zkt keyType + zval []byte + ) + + err = ErrNotFound + + // Since entries never hop across level, finding key/value + // in smaller level make later levels irrelevant. + v.walkOverlapping(aux, ikey, func(level int, t *tFile) bool { + if level >= 0 && !tseek { + if tset == nil { + tset = &tSet{level, t} + } else { + tseek = true + } + } + + var ( + fikey, fval []byte + ferr error + ) + if noValue { + fikey, ferr = v.s.tops.findKey(t, ikey, ro) + } else { + fikey, fval, ferr = v.s.tops.find(t, ikey, ro) + } + + switch ferr { + case nil: + case ErrNotFound: + return true + default: + err = ferr + return false + } + + if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil { + if v.s.icmp.uCompare(ukey, fukey) == 0 { + // Level <= 0 may overlaps each-other. + if level <= 0 { + if fseq >= zseq { + zfound = true + zseq = fseq + zkt = fkt + zval = fval + } + } else { + switch fkt { + case keyTypeVal: + value = fval + err = nil + case keyTypeDel: + default: + panic("leveldb: invalid internalKey type") + } + return false + } + } + } else { + err = fkerr + return false + } + + return true + }, func(level int) bool { + if zfound { + switch zkt { + case keyTypeVal: + value = zval + err = nil + case keyTypeDel: + default: + panic("leveldb: invalid internalKey type") + } + return false + } + + return true + }) + + if tseek && tset.table.consumeSeek() <= 0 { + tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) + } + + return +} + +func (v *version) sampleSeek(ikey internalKey) (tcomp bool) { + var tset *tSet + + v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool { + if tset == nil { + tset = &tSet{level, t} + return true + } + if tset.table.consumeSeek() <= 0 { + tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) + } + return false + }, nil) + + return +} + +func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { + strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) + for level, tables := range v.levels { + if level == 0 { + // Merge all level zero files together since they may overlap. + for _, t := range tables { + its = append(its, v.s.tops.newIterator(t, slice, ro)) + } + } else if len(tables) != 0 { + its = append(its, iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)) + } + } + return +} + +func (v *version) newStaging() *versionStaging { + return &versionStaging{base: v} +} + +// Spawn a new version based on this version. +func (v *version) spawn(r *sessionRecord) *version { + staging := v.newStaging() + staging.commit(r) + return staging.finish() +} + +func (v *version) fillRecord(r *sessionRecord) { + for level, tables := range v.levels { + for _, t := range tables { + r.addTableFile(level, t) + } + } +} + +func (v *version) tLen(level int) int { + if level < len(v.levels) { + return len(v.levels[level]) + } + return 0 +} + +func (v *version) offsetOf(ikey internalKey) (n int64, err error) { + for level, tables := range v.levels { + for _, t := range tables { + if v.s.icmp.Compare(t.imax, ikey) <= 0 { + // Entire file is before "ikey", so just add the file size + n += t.size + } else if v.s.icmp.Compare(t.imin, ikey) > 0 { + // Entire file is after "ikey", so ignore + if level > 0 { + // Files other than level 0 are sorted by meta->min, so + // no further files in this level will contain data for + // "ikey". + break + } + } else { + // "ikey" falls in the range for this table. Add the + // approximate offset of "ikey" within the table. + if m, err := v.s.tops.offsetOf(t, ikey); err == nil { + n += m + } else { + return 0, err + } + } + } + } + + return +} + +func (v *version) pickMemdbLevel(umin, umax []byte, maxLevel int) (level int) { + if maxLevel > 0 { + if len(v.levels) == 0 { + return maxLevel + } + if !v.levels[0].overlaps(v.s.icmp, umin, umax, true) { + var overlaps tFiles + for ; level < maxLevel; level++ { + if pLevel := level + 1; pLevel >= len(v.levels) { + return maxLevel + } else if v.levels[pLevel].overlaps(v.s.icmp, umin, umax, false) { + break + } + if gpLevel := level + 2; gpLevel < len(v.levels) { + overlaps = v.levels[gpLevel].getOverlaps(overlaps, v.s.icmp, umin, umax, false) + if overlaps.size() > int64(v.s.o.GetCompactionGPOverlaps(level)) { + break + } + } + } + } + } + return +} + +func (v *version) computeCompaction() { + // Precomputed best level for next compaction + bestLevel := int(-1) + bestScore := float64(-1) + + statFiles := make([]int, len(v.levels)) + statSizes := make([]string, len(v.levels)) + statScore := make([]string, len(v.levels)) + statTotSize := int64(0) + + for level, tables := range v.levels { + var score float64 + size := tables.size() + if level == 0 { + // We treat level-0 specially by bounding the number of files + // instead of number of bytes for two reasons: + // + // (1) With larger write-buffer sizes, it is nice not to do too + // many level-0 compaction. + // + // (2) The files in level-0 are merged on every read and + // therefore we wish to avoid too many files when the individual + // file size is small (perhaps because of a small write-buffer + // setting, or very high compression ratios, or lots of + // overwrites/deletions). + score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) + } else { + score = float64(size) / float64(v.s.o.GetCompactionTotalSize(level)) + } + + if score > bestScore { + bestLevel = level + bestScore = score + } + + statFiles[level] = len(tables) + statSizes[level] = shortenb(int(size)) + statScore[level] = fmt.Sprintf("%.2f", score) + statTotSize += size + } + + v.cLevel = bestLevel + v.cScore = bestScore + + v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore) +} + +func (v *version) needCompaction() bool { + return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil +} + +type tablesScratch struct { + added map[int64]atRecord + deleted map[int64]struct{} +} + +type versionStaging struct { + base *version + levels []tablesScratch +} + +func (p *versionStaging) getScratch(level int) *tablesScratch { + if level >= len(p.levels) { + newLevels := make([]tablesScratch, level+1) + copy(newLevels, p.levels) + p.levels = newLevels + } + return &(p.levels[level]) +} + +func (p *versionStaging) commit(r *sessionRecord) { + // Deleted tables. + for _, r := range r.deletedTables { + scratch := p.getScratch(r.level) + if r.level < len(p.base.levels) && len(p.base.levels[r.level]) > 0 { + if scratch.deleted == nil { + scratch.deleted = make(map[int64]struct{}) + } + scratch.deleted[r.num] = struct{}{} + } + if scratch.added != nil { + delete(scratch.added, r.num) + } + } + + // New tables. + for _, r := range r.addedTables { + scratch := p.getScratch(r.level) + if scratch.added == nil { + scratch.added = make(map[int64]atRecord) + } + scratch.added[r.num] = r + if scratch.deleted != nil { + delete(scratch.deleted, r.num) + } + } +} + +func (p *versionStaging) finish() *version { + // Build new version. + nv := newVersion(p.base.s) + numLevel := len(p.levels) + if len(p.base.levels) > numLevel { + numLevel = len(p.base.levels) + } + nv.levels = make([]tFiles, numLevel) + for level := 0; level < numLevel; level++ { + var baseTabels tFiles + if level < len(p.base.levels) { + baseTabels = p.base.levels[level] + } + + if level < len(p.levels) { + scratch := p.levels[level] + + var nt tFiles + // Prealloc list if possible. + if n := len(baseTabels) + len(scratch.added) - len(scratch.deleted); n > 0 { + nt = make(tFiles, 0, n) + } + + // Base tables. + for _, t := range baseTabels { + if _, ok := scratch.deleted[t.fd.Num]; ok { + continue + } + if _, ok := scratch.added[t.fd.Num]; ok { + continue + } + nt = append(nt, t) + } + + // New tables. + for _, r := range scratch.added { + nt = append(nt, tableFileFromRecord(r)) + } + + if len(nt) != 0 { + // Sort tables. + if level == 0 { + nt.sortByNum() + } else { + nt.sortByKey(p.base.s.icmp) + } + + nv.levels[level] = nt + } + } else { + nv.levels[level] = baseTabels + } + } + + // Trim levels. + n := len(nv.levels) + for ; n > 0 && nv.levels[n-1] == nil; n-- { + } + nv.levels = nv.levels[:n] + + // Compute compaction score for new version. + nv.computeCompaction() + + return nv +} + +type versionReleaser struct { + v *version + once bool +} + +func (vr *versionReleaser) Release() { + v := vr.v + v.s.vmu.Lock() + if !vr.once { + v.releaseNB() + vr.once = true + } + v.s.vmu.Unlock() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 851ad510de..caa4a434af 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -326,9 +326,30 @@ github.com/casbin/casbin/persist/file-adapter github.com/casbin/casbin/rbac github.com/casbin/casbin/rbac/default-role-manager github.com/casbin/casbin/util +# github.com/casbin/casbin/v2 v2.97.0 +## explicit; go 1.13 +github.com/casbin/casbin/v2 +github.com/casbin/casbin/v2/config +github.com/casbin/casbin/v2/constant +github.com/casbin/casbin/v2/effector +github.com/casbin/casbin/v2/errors +github.com/casbin/casbin/v2/log +github.com/casbin/casbin/v2/model +github.com/casbin/casbin/v2/persist +github.com/casbin/casbin/v2/persist/cache +github.com/casbin/casbin/v2/persist/file-adapter +github.com/casbin/casbin/v2/rbac +github.com/casbin/casbin/v2/rbac/default-role-manager +github.com/casbin/casbin/v2/util +# github.com/casbin/govaluate v1.1.0 +## explicit; go 1.13 +github.com/casbin/govaluate # github.com/casbin/xorm-adapter v1.0.1-0.20190716004226-a317737a1007 ## explicit github.com/casbin/xorm-adapter +# github.com/casbin/xorm-adapter/v2 v2.5.1 +## explicit; go 1.12 +github.com/casbin/xorm-adapter/v2 # github.com/cenkalti/backoff/v4 v4.2.1 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 @@ -600,6 +621,9 @@ github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers +# github.com/golang/snappy v0.0.4 +## explicit +github.com/golang/snappy # github.com/google/btree v1.1.2 ## explicit; go 1.18 github.com/google/btree @@ -1000,6 +1024,20 @@ github.com/stretchr/objx ## explicit; go 1.20 github.com/stretchr/testify/assert github.com/stretchr/testify/mock +# github.com/syndtr/goleveldb v1.0.0 +## explicit +github.com/syndtr/goleveldb/leveldb +github.com/syndtr/goleveldb/leveldb/cache +github.com/syndtr/goleveldb/leveldb/comparer +github.com/syndtr/goleveldb/leveldb/errors +github.com/syndtr/goleveldb/leveldb/filter +github.com/syndtr/goleveldb/leveldb/iterator +github.com/syndtr/goleveldb/leveldb/journal +github.com/syndtr/goleveldb/leveldb/memdb +github.com/syndtr/goleveldb/leveldb/opt +github.com/syndtr/goleveldb/leveldb/storage +github.com/syndtr/goleveldb/leveldb/table +github.com/syndtr/goleveldb/leveldb/util # github.com/tidwall/gjson v1.14.3 ## explicit; go 1.12 github.com/tidwall/gjson @@ -2257,12 +2295,27 @@ upper.io/db.v3/lib/reflectx upper.io/db.v3/lib/sqlbuilder upper.io/db.v3/mysql upper.io/db.v3/postgresql -# xorm.io/builder v0.3.6 +# xorm.io/builder v0.3.7 ## explicit; go 1.11 xorm.io/builder # xorm.io/core v0.7.2 ## explicit xorm.io/core +# xorm.io/xorm v1.0.3 +## explicit; go 1.11 +xorm.io/xorm +xorm.io/xorm/caches +xorm.io/xorm/contexts +xorm.io/xorm/convert +xorm.io/xorm/core +xorm.io/xorm/dialects +xorm.io/xorm/internal/json +xorm.io/xorm/internal/statements +xorm.io/xorm/internal/utils +xorm.io/xorm/log +xorm.io/xorm/names +xorm.io/xorm/schemas +xorm.io/xorm/tags # github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127 # github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.5.7-v3refs # k8s.io/api => k8s.io/api v0.26.11 diff --git a/vendor/xorm.io/builder/.drone.yml b/vendor/xorm.io/builder/.drone.yml index 557dbf66f0..61d323d5f6 100644 --- a/vendor/xorm.io/builder/.drone.yml +++ b/vendor/xorm.io/builder/.drone.yml @@ -1,31 +1,6 @@ --- kind: pipeline -name: go1.10 - -workspace: - base: /go - path: src/xorm.io/builder - -steps: -- name: test - pull: default - image: golang:1.10 - commands: - - go get -u golang.org/x/lint/golint - - go get -u github.com/stretchr/testify/assert - - go get -u github.com/go-xorm/sqlfiddle - - golint ./... - - go vet - - go test -v -race -coverprofile=coverage.txt -covermode=atomic - when: - event: - - push - - tag - - pull_request - ---- -kind: pipeline -name: go1.11 +name: testing steps: - name: test @@ -36,50 +11,6 @@ steps: - golint ./... - go vet - go test -v -race -coverprofile=coverage.txt -covermode=atomic - environment: - GOPROXY: https://goproxy.cn - GO111MODULE: "on" - when: - event: - - push - - tag - - pull_request - ---- -kind: pipeline -name: go1.12 - -steps: -- name: test - pull: default - image: golang:1.12 - commands: - - go get -u golang.org/x/lint/golint - - golint ./... - - go vet - - go test -v -race -coverprofile=coverage.txt -covermode=atomic - environment: - GOPROXY: https://goproxy.cn - GO111MODULE: "on" - when: - event: - - push - - tag - - pull_request - ---- -kind: pipeline -name: go1.13 - -steps: -- name: test - pull: default - image: golang:1.13 - commands: - - go get -u golang.org/x/lint/golint - - golint ./... - - go vet - - go test -v -race -coverprofile=coverage.txt -covermode=atomic environment: GOPROXY: https://goproxy.cn GO111MODULE: "on" diff --git a/vendor/xorm.io/builder/.gitignore b/vendor/xorm.io/builder/.gitignore new file mode 100644 index 0000000000..723ef36f4e --- /dev/null +++ b/vendor/xorm.io/builder/.gitignore @@ -0,0 +1 @@ +.idea \ No newline at end of file diff --git a/vendor/xorm.io/builder/builder.go b/vendor/xorm.io/builder/builder.go index 4f14222843..cccc8a7fd9 100644 --- a/vendor/xorm.io/builder/builder.go +++ b/vendor/xorm.io/builder/builder.go @@ -17,7 +17,7 @@ const ( insertType // insert updateType // update deleteType // delete - unionType // union + setOpType // set operation ) // all databasees @@ -27,6 +27,10 @@ const ( MYSQL = "mysql" MSSQL = "mssql" ORACLE = "oracle" + + UNION = "union" + INTERSECT = "intersect" + EXCEPT = "except" ) type join struct { @@ -35,9 +39,10 @@ type join struct { joinCond Cond } -type union struct { - unionType string - builder *Builder +type setOp struct { + opType string + distinctType string + builder *Builder } type limit struct { @@ -56,7 +61,7 @@ type Builder struct { cond Cond selects []string joins []join - unions []union + setOps []setOp limitation *limit insertCols []string insertVals []interface{} @@ -144,33 +149,48 @@ func (b *Builder) Into(tableName string) *Builder { } // Union sets union conditions -func (b *Builder) Union(unionTp string, unionCond *Builder) *Builder { +func (b *Builder) Union(distinctType string, cond *Builder) *Builder { + return b.setOperation(UNION, distinctType, cond) +} + +// Intersect sets intersect conditions +func (b *Builder) Intersect(distinctType string, cond *Builder) *Builder { + return b.setOperation(INTERSECT, distinctType, cond) +} + +// Except sets except conditions +func (b *Builder) Except(distinctType string, cond *Builder) *Builder { + return b.setOperation(EXCEPT, distinctType, cond) +} + +func (b *Builder) setOperation(opType, distinctType string, cond *Builder) *Builder { + var builder *Builder - if b.optype != unionType { + if b.optype != setOpType { builder = &Builder{cond: NewCond()} - builder.optype = unionType + builder.optype = setOpType builder.dialect = b.dialect builder.selects = b.selects - currentUnions := b.unions - // erase sub unions (actually append to new Builder.unions) - b.unions = nil + currentSetOps := b.setOps + // erase sub setOps (actually append to new Builder.unions) + b.setOps = nil - for e := range currentUnions { - currentUnions[e].builder.dialect = b.dialect + for e := range currentSetOps { + currentSetOps[e].builder.dialect = b.dialect } - builder.unions = append(append(builder.unions, union{"", b}), currentUnions...) + builder.setOps = append(append(builder.setOps, setOp{opType, "", b}), currentSetOps...) } else { builder = b } - if unionCond != nil { - if unionCond.dialect == "" && builder.dialect != "" { - unionCond.dialect = builder.dialect + if cond != nil { + if cond.dialect == "" && builder.dialect != "" { + cond.dialect = builder.dialect } - builder.unions = append(builder.unions, union{unionTp, unionCond}) + builder.setOps = append(builder.setOps, setOp{opType, distinctType, cond}) } return builder @@ -240,8 +260,8 @@ func (b *Builder) WriteTo(w Writer) error { return b.updateWriteTo(w) case deleteType: return b.deleteWriteTo(w) - case unionType: - return b.unionWriteTo(w) + case setOpType: + return b.setOpWriteTo(w) } return ErrNotSupportType diff --git a/vendor/xorm.io/builder/builder_insert.go b/vendor/xorm.io/builder/builder_insert.go index 9558a8acad..8cef5c56c6 100644 --- a/vendor/xorm.io/builder/builder_insert.go +++ b/vendor/xorm.io/builder/builder_insert.go @@ -58,6 +58,8 @@ func (b *Builder) insertWriteTo(w Writer) error { if e, ok := value.(expr); ok { fmt.Fprintf(valBuffer, "(%s)", e.sql) args = append(args, e.args...) + } else if value == nil { + fmt.Fprintf(valBuffer, `null`) } else { fmt.Fprint(valBuffer, "?") args = append(args, value) diff --git a/vendor/xorm.io/builder/builder_limit.go b/vendor/xorm.io/builder/builder_limit.go index 82435dacbd..82e1179367 100644 --- a/vendor/xorm.io/builder/builder_limit.go +++ b/vendor/xorm.io/builder/builder_limit.go @@ -21,6 +21,9 @@ func (b *Builder) limitWriteTo(w Writer) error { } // erase limit condition b.limitation = nil + defer func() { + b.limitation = limit + }() ow := w.(*BytesWriter) switch strings.ToLower(strings.TrimSpace(b.dialect)) { @@ -34,7 +37,7 @@ func (b *Builder) limitWriteTo(w Writer) error { b.selects = append(selects, "ROWNUM RN") var wb *Builder - if b.optype == unionType { + if b.optype == setOpType { wb = Dialect(b.dialect).Select("at.*", "ROWNUM RN"). From(b, "at") } else { @@ -55,7 +58,7 @@ func (b *Builder) limitWriteTo(w Writer) error { return final.WriteTo(ow) case SQLITE, MYSQL, POSTGRES: // if type UNION, we need to write previous content back to current writer - if b.optype == unionType { + if b.optype == setOpType { if err := b.WriteTo(ow); err != nil { return err } @@ -77,7 +80,7 @@ func (b *Builder) limitWriteTo(w Writer) error { b.selects[1:]...), "ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS RN") var wb *Builder - if b.optype == unionType { + if b.optype == setOpType { wb = Dialect(b.dialect).Select("*", "ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS RN"). From(b, "at") } else { diff --git a/vendor/xorm.io/builder/builder_select.go b/vendor/xorm.io/builder/builder_select.go index 814d1b5afb..087a71d870 100644 --- a/vendor/xorm.io/builder/builder_select.go +++ b/vendor/xorm.io/builder/builder_select.go @@ -63,7 +63,7 @@ func (b *Builder) selectWriteTo(w Writer) error { } switch b.subQuery.optype { - case selectType, unionType: + case selectType, setOpType: fmt.Fprint(w, " FROM (") if err := b.subQuery.WriteTo(w); err != nil { return err diff --git a/vendor/xorm.io/builder/builder_union.go b/vendor/xorm.io/builder/builder_set_operations.go similarity index 69% rename from vendor/xorm.io/builder/builder_union.go rename to vendor/xorm.io/builder/builder_set_operations.go index 4ba9216178..b2b4a3daf9 100644 --- a/vendor/xorm.io/builder/builder_union.go +++ b/vendor/xorm.io/builder/builder_set_operations.go @@ -9,19 +9,19 @@ import ( "strings" ) -func (b *Builder) unionWriteTo(w Writer) error { +func (b *Builder) setOpWriteTo(w Writer) error { if b.limitation != nil || b.cond.IsValid() || b.orderBy != "" || b.having != "" || b.groupBy != "" { return ErrNotUnexpectedUnionConditions } - for idx, u := range b.unions { - current := u.builder + for idx, o := range b.setOps { + current := o.builder if current.optype != selectType { return ErrUnsupportedUnionMembers } - if len(b.unions) == 1 { + if len(b.setOps) == 1 { if err := current.selectWriteTo(w); err != nil { return err } @@ -31,7 +31,11 @@ func (b *Builder) unionWriteTo(w Writer) error { } if idx != 0 { - fmt.Fprint(w, fmt.Sprintf(" UNION %v ", strings.ToUpper(u.unionType))) + if o.distinctType == "" { + fmt.Fprint(w, fmt.Sprintf(" %s ", strings.ToUpper(o.opType))) + } else { + fmt.Fprint(w, fmt.Sprintf(" %s %s ", strings.ToUpper(o.opType), strings.ToUpper(o.distinctType))) + } } fmt.Fprint(w, "(") diff --git a/vendor/xorm.io/builder/builder_update.go b/vendor/xorm.io/builder/builder_update.go index 9b6e10bc95..5fffbe3497 100644 --- a/vendor/xorm.io/builder/builder_update.go +++ b/vendor/xorm.io/builder/builder_update.go @@ -45,6 +45,10 @@ func (b *Builder) updateWriteTo(w Writer) error { } } + if !b.cond.IsValid() { + return nil + } + if _, err := fmt.Fprint(w, " WHERE "); err != nil { return err } diff --git a/vendor/xorm.io/builder/cond_eq.go b/vendor/xorm.io/builder/cond_eq.go index 32f04d5d4a..9976d1804c 100644 --- a/vendor/xorm.io/builder/cond_eq.go +++ b/vendor/xorm.io/builder/cond_eq.go @@ -64,6 +64,10 @@ func (eq Eq) OpWriteTo(op string, w Writer) error { return err } w.Append(int(v.(Decr))) + case nil: + if _, err := fmt.Fprintf(w, "%s=null", k); err != nil { + return err + } default: if _, err := fmt.Fprintf(w, "%s=?", k); err != nil { return err diff --git a/vendor/xorm.io/builder/sql.go b/vendor/xorm.io/builder/sql.go index 4250fea18d..a6d1066b82 100644 --- a/vendor/xorm.io/builder/sql.go +++ b/vendor/xorm.io/builder/sql.go @@ -75,6 +75,7 @@ func noSQLQuoteNeeded(a interface{}) bool { } t := reflect.TypeOf(a) + switch t.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return true @@ -133,12 +134,16 @@ func ConvertToBoundSQL(sql string, args []interface{}) (string, error) { return buf.String(), nil } -// ConvertPlaceholder replaces ? to $1, $2 ... or :1, :2 ... according prefix +// ConvertPlaceholder replaces the place holder ? to $1, $2 ... or :1, :2 ... according prefix func ConvertPlaceholder(sql, prefix string) (string, error) { buf := strings.Builder{} var i, j, start int + var ready = true for ; i < len(sql); i++ { - if sql[i] == '?' { + if sql[i] == '\'' && i > 0 && sql[i-1] != '\\' { + ready = !ready + } + if ready && sql[i] == '?' { if _, err := buf.WriteString(sql[start:i]); err != nil { return "", err } diff --git a/vendor/xorm.io/xorm/.changelog.yml b/vendor/xorm.io/xorm/.changelog.yml new file mode 100644 index 0000000000..1303c9cc93 --- /dev/null +++ b/vendor/xorm.io/xorm/.changelog.yml @@ -0,0 +1,53 @@ +# The full repository name +repo: xorm/xorm + +# Service type (gitea or github) +service: gitea + +# Base URL for Gitea instance if using gitea service type (optional) +# Default: https://gitea.com +base-url: + +# Changelog groups and which labeled PRs to add to each group +groups: + - + name: BREAKING + labels: + - kind/breaking + - + name: FEATURES + labels: + - kind/feature + - + name: SECURITY + labels: + - kind/security + - + name: BUGFIXES + labels: + - kind/bug + - + name: ENHANCEMENTS + labels: + - kind/enhancement + - kind/refactor + - kind/ui + - + name: TESTING + labels: + - kind/testing + - + name: BUILD + labels: + - kind/build + - kind/lint + - + name: DOCS + labels: + - kind/docs + - + name: MISC + default: true + +# regex indicating which labels to skip for the changelog +skip-labels: skip-changelog|backport\/.+ diff --git a/vendor/xorm.io/xorm/.drone.yml b/vendor/xorm.io/xorm/.drone.yml new file mode 100644 index 0000000000..7a18e0d683 --- /dev/null +++ b/vendor/xorm.io/xorm/.drone.yml @@ -0,0 +1,304 @@ +--- +kind: pipeline +name: testing +steps: +- name: test-vet + image: golang:1.11 # The lowest golang requirement + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + commands: + - make vet + - make test + when: + event: + - push + - pull_request + +- name: test-sqlite + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + commands: + - make test-sqlite + - TEST_CACHE_ENABLE=true make test-sqlite + - TEST_QUOTE_POLICY=reserved make test-sqlite + when: + event: + - push + - pull_request + +- name: test-mysql + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql + TEST_MYSQL_CHARSET: utf8 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: + commands: + - make test-mysql + - TEST_CACHE_ENABLE=true make test-mysql + - TEST_QUOTE_POLICY=reserved make test-mysql + when: + event: + - push + - pull_request + +- name: test-mysql8 + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql8 + TEST_MYSQL_CHARSET: utf8mb4 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: + commands: + - make test-mysql + - TEST_CACHE_ENABLE=true make test-mysql + - TEST_QUOTE_POLICY=reserved make test-mysql + when: + event: + - push + - pull_request + +- name: test-mysql-utf8mb4 + image: golang:1.12 + depends_on: + - test-mysql + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql + TEST_MYSQL_CHARSET: utf8mb4 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: + commands: + - make test-mysql + - TEST_CACHE_ENABLE=true make test-mysql + - TEST_QUOTE_POLICY=reserved make test-mysql + when: + event: + - push + - pull_request + +- name: test-mymysql + pull: default + image: golang:1.12 + depends_on: + - test-mysql-utf8mb4 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MYSQL_HOST: mysql:3306 + TEST_MYSQL_DBNAME: xorm_test + TEST_MYSQL_USERNAME: root + TEST_MYSQL_PASSWORD: + commands: + - make test-mymysql + - TEST_CACHE_ENABLE=true make test-mymysql + - TEST_QUOTE_POLICY=reserved make test-mymysql + when: + event: + - push + - pull_request + +- name: test-postgres + pull: default + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_PGSQL_HOST: pgsql + TEST_PGSQL_DBNAME: xorm_test + TEST_PGSQL_USERNAME: postgres + TEST_PGSQL_PASSWORD: postgres + commands: + - make test-postgres + - TEST_CACHE_ENABLE=true make test-postgres + - TEST_QUOTE_POLICY=reserved make test-postgres + when: + event: + - push + - pull_request + +- name: test-postgres-schema + pull: default + image: golang:1.12 + depends_on: + - test-postgres + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_PGSQL_HOST: pgsql + TEST_PGSQL_SCHEMA: xorm + TEST_PGSQL_DBNAME: xorm_test + TEST_PGSQL_USERNAME: postgres + TEST_PGSQL_PASSWORD: postgres + commands: + - make test-postgres + - TEST_CACHE_ENABLE=true make test-postgres + - TEST_QUOTE_POLICY=reserved make test-postgres + when: + event: + - push + - pull_request + +- name: test-mssql + pull: default + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_MSSQL_HOST: mssql + TEST_MSSQL_DBNAME: xorm_test + TEST_MSSQL_USERNAME: sa + TEST_MSSQL_PASSWORD: "yourStrong(!)Password" + commands: + - make test-mssql + - TEST_CACHE_ENABLE=true make test-mssql + - TEST_QUOTE_POLICY=reserved make test-mssql + when: + event: + - push + - pull_request + +- name: test-tidb + pull: default + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_TIDB_HOST: "tidb:4000" + TEST_TIDB_DBNAME: xorm_test + TEST_TIDB_USERNAME: root + TEST_TIDB_PASSWORD: + commands: + - make test-tidb + - TEST_CACHE_ENABLE=true make test-tidb + - TEST_QUOTE_POLICY=reserved make test-tidb + when: + event: + - push + - pull_request + +- name: test-cockroach + pull: default + image: golang:1.13 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + TEST_COCKROACH_HOST: "cockroach:26257" + TEST_COCKROACH_DBNAME: xorm_test + TEST_COCKROACH_USERNAME: root + TEST_COCKROACH_PASSWORD: + commands: + - sleep 10 + - make test-cockroach + - TEST_CACHE_ENABLE=true make test-cockroach + when: + event: + - push + - pull_request + +- name: merge_coverage + pull: default + image: golang:1.12 + environment: + GO111MODULE: "on" + GOPROXY: "https://goproxy.cn" + depends_on: + - test-vet + - test-sqlite + - test-mysql + - test-mysql8 + - test-mymysql + - test-postgres + - test-postgres-schema + - test-mssql + - test-tidb + - test-cockroach + commands: + - make coverage + when: + event: + - push + - pull_request + +services: + +- name: mysql + pull: default + image: mysql:5.7 + environment: + MYSQL_ALLOW_EMPTY_PASSWORD: yes + MYSQL_DATABASE: xorm_test + when: + event: + - push + - tag + - pull_request + +- name: mysql8 + pull: default + image: mysql:8.0 + environment: + MYSQL_ALLOW_EMPTY_PASSWORD: yes + MYSQL_DATABASE: xorm_test + when: + event: + - push + - tag + - pull_request + +- name: pgsql + pull: default + image: postgres:9.5 + environment: + POSTGRES_DB: xorm_test + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + when: + event: + - push + - tag + - pull_request + +- name: mssql + pull: default + image: microsoft/mssql-server-linux:latest + environment: + ACCEPT_EULA: Y + SA_PASSWORD: yourStrong(!)Password + MSSQL_PID: Developer + when: + event: + - push + - tag + - pull_request + +- name: tidb + pull: default + image: pingcap/tidb:v3.0.3 + when: + event: + - push + - tag + - pull_request + +- name: cockroach + pull: default + image: cockroachdb/cockroach:v19.2.4 + commands: + - /cockroach/cockroach start --insecure + when: + event: + - push + - tag + - pull_request diff --git a/vendor/xorm.io/xorm/.gitignore b/vendor/xorm.io/xorm/.gitignore new file mode 100644 index 0000000000..617d5da727 --- /dev/null +++ b/vendor/xorm.io/xorm/.gitignore @@ -0,0 +1,38 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.db + +# Folders +_obj +_test +vendor/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*.log +.vendor +temp_test.go +.vscode +xorm.test +*.sqlite3 +test.db.sql + +.idea/ + +*coverage.out +test.db +integrations/*.sql diff --git a/vendor/xorm.io/xorm/.revive.toml b/vendor/xorm.io/xorm/.revive.toml new file mode 100644 index 0000000000..64e223bbfa --- /dev/null +++ b/vendor/xorm.io/xorm/.revive.toml @@ -0,0 +1,25 @@ +ignoreGeneratedHeader = false +severity = "warning" +confidence = 0.8 +errorCode = 1 +warningCode = 1 + +[rule.blank-imports] +[rule.context-as-argument] +[rule.context-keys-type] +[rule.dot-imports] +[rule.error-return] +[rule.error-strings] +[rule.error-naming] +[rule.exported] +[rule.if-return] +[rule.increment-decrement] +[rule.var-naming] +[rule.var-declaration] +[rule.package-comments] +[rule.range] +[rule.receiver-naming] +[rule.time-naming] +[rule.unexported-return] +[rule.indent-error-flow] +[rule.errorf] \ No newline at end of file diff --git a/vendor/xorm.io/xorm/CHANGELOG.md b/vendor/xorm.io/xorm/CHANGELOG.md new file mode 100644 index 0000000000..fa0259bc73 --- /dev/null +++ b/vendor/xorm.io/xorm/CHANGELOG.md @@ -0,0 +1,205 @@ +# Changelog + +This changelog goes through all the changes that have been made in each release +without substantial changes to our git log. + +## [1.0.3](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1281) - 2020-07-10 + +* BUGFIXES + * Fix dump of sqlite (#1639) +* ENHANCEMENTS + * Fix index name parsing in SQLite dialect (#1737) + * add hooks for Commit and Rollback (#1733) + +## [1.0.2](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1261) - 2020-06-16 + +* FEATURES + * Add Hook (#1644) +* BUGFIXES + * Fix bug when ID used but no reference table given (#1709) + * Fix find and count bug (#1651) +* ENHANCEMENTS + * chore: improve snakeCasedName performance (#1688) + * Fix find with another struct (#1666) + * fix GetColumns missing ordinal position (#1660) +* MISC + * chore: improve titleCasedName performance (#1691) + +## [1.0.1](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1253) - 2020-03-25 + +* BUGFIXES + * Oracle : Local Naming Method (#1515) + * Fix find and count bug (#1618) + * Fix duplicated deleted condition on FindAndCount (#1619) + * Fix find and count bug with cache (#1622) + * Fix postgres schema problem (#1624) + * Fix quote with blank (#1626) + +## [1.0.0](https://gitea.com/xorm/xorm/pulls?q=&type=all&state=closed&milestone=1242) - 2020-03-22 + +* BREAKING + * Add context for dialects (#1558) + * Move zero functions to a standalone package (#1548) + * Merge core package back into the main repository and split into serval sub packages. (#1543) +* FEATURES + * Use a new ContextLogger interface to implement logger (#1557) +* BUGFIXES + * Fix setschema (#1606) + * Fix dump/import bug (#1603) + * Fix pk bug (#1602) + * Fix master/slave bug (#1601) + * Fix bug when dump (#1597) + * Ignore schema when dbtype is not postgres (#1593) + * Fix table name (#1590) + * Fix find alias bug (#1581) + * Fix rows bug (#1576) + * Fix map with cols (#1575) + * Fix bug on deleted with join (#1570) + * Improve quote policy (#1567) + * Fix break session sql enable feature (#1566) + * Fix mssql quote (#1535) + * Fix join table name quote bug (#1534) + * Fix mssql issue with duplicate columns. (#1225) + * Fix mysql8.0 sync failed (#808) +* ENHANCEMENTS + * Fix batch insert interface slice be panic (#1598) + * Move some codes to statement sub package (#1574) + * Remove circle file (#1569) + * Move statement as a sub package (#1564) + * Move maptype to tag parser (#1561) + * Move caches to manager (#1553) + * Improve code (#1552) + * Improve some codes (#1551) + * Improve statement (#1549) + * Move tag parser related codes as a standalone sub package (#1547) + * Move reserve words related files into dialects sub package (#1544) + * Fix `Conversion` method `ToDB() ([]byte, error)` return type is nil (#1296) + * Check driver.Valuer response, and skip the column if nil (#1167) + * Add cockroach support and tests (#896) +* TESTING + * Improve tests (#1572) +* BUILD + * Add changelog file and tool configuration (#1546) +* DOCS + * Fix outdate changelog (#1565) + +## old changelog + +* **v0.6.5** + * Postgres schema support + * vgo support + * Add FindAndCount + * Database special params support via NewEngineWithParams + * Some bugs fixed + +* **v0.6.4** + * Automatical Read/Write seperatelly + * Query/QueryString/QueryInterface and action with Where/And + * Get support non-struct variables + * BufferSize on Iterate + * fix some other bugs. + +* **v0.6.3** + * merge tests to main project + * add `Exist` function + * add `SumInt` function + * Mysql now support read and create column comment. + * fix time related bugs. + * fix some other bugs. + +* **v0.6.2** + * refactor tag parse methods + * add Scan features to Get + * add QueryString method + +* **v0.4.5** + * many bugs fixed + * extends support unlimited deep + * Delete Limit support + +* **v0.4.4** + * ql database expriment support + * tidb database expriment support + * sql.NullString and etc. field support + * select ForUpdate support + * many bugs fixed + +* **v0.4.3** + * Json column type support + * oracle expirement support + * bug fixed + +* **v0.4.2** + * Transaction will auto rollback if not Rollback or Commit be called. + * Gonic Mapper support + * bug fixed + +* **v0.4.1** + * deleted tag support for soft delete + * bug fixed + +* **v0.4.0 RC1** + Changes: + * moved xorm cmd to [github.com/go-xorm/cmd](github.com/go-xorm/cmd) + * refactored general DB operation a core lib at [github.com/go-xorm/core](https://github.com/go-xorm/core) + * moved tests to github.com/go-xorm/tests [github.com/go-xorm/tests](github.com/go-xorm/tests) + + Improvements: + * Prepared statement cache + * Add Incr API + * Specify Timezone Location + +* **v0.3.2** + Improvements: + * Add AllCols & MustCols function + * Add TableName for custom table name + + Bug Fixes: + * #46 + * #51 + * #53 + * #89 + * #86 + * #92 + +* **v0.3.1** + + Features: + * Support MSSQL DB via ODBC driver ([github.com/lunny/godbc](https://github.com/lunny/godbc)); + * Composite Key, using multiple pk xorm tag + * Added Row() API as alternative to Iterate() API for traversing result set, provide similar usages to sql.Rows type + * ORM struct allowed declaration of pointer builtin type as members to allow null DB fields + * Before and After Event processors + + Improvements: + * Allowed int/int32/int64/uint/uint32/uint64/string as Primary Key type + * Performance improvement for Get()/Find()/Iterate() + + +* **v0.2.3** : Improved documents; Optimistic Locking support; Timestamp with time zone support; Mapper change to tableMapper and columnMapper & added PrefixMapper & SuffixMapper support custom table or column name's prefix and suffix;Insert now return affected, err instead of id, err; Added UseBool & Distinct; + +* **v0.2.2** : Postgres drivers now support lib/pq; Added method Iterate for record by record to handler;Added SetMaxConns(go1.2+) support; some bugs fixed. + +* **v0.2.1** : Added database reverse tool, now support generate go & c++ codes, see [Xorm Tool README](https://github.com/go-xorm/xorm/blob/master/xorm/README.md); some bug fixed. + +* **v0.2.0** : Added Cache supported, select is speeder up 3~5x; Added SameMapper for same name between struct and table; Added Sync method for auto added tables, columns, indexes; + +* **v0.1.9** : Added postgres and mymysql supported; Added ` and ? supported on Raw SQL even if postgres; Added Cols, StoreEngine, Charset function, Added many column data type supported, please see [Mapping Rules](#mapping). + +* **v0.1.8** : Added union index and union unique supported, please see [Mapping Rules](#mapping). + +* **v0.1.7** : Added IConnectPool interface and NoneConnectPool, SysConnectPool, SimpleConnectPool the three implements. You can choose one of them and the default is SysConnectPool. You can customrize your own connection pool. struct Engine added Close method, It should be invoked before system exit. + +* **v0.1.6** : Added conversion interface support; added struct derive support; added single mapping support + +* **v0.1.5** : Added multi threads support; added Sql() function for struct query; Get function changed return inteface; MakeSession and Create are instead with NewSession and NewEngine. + +* **v0.1.4** : Added simple cascade load support; added more data type supports. + +* **v0.1.3** : Find function now supports both slice and map; Add Table function for multi tables and temperory tables support + +* **v0.1.2** : Insert function now supports both struct and slice pointer parameters, batch inserting and auto transaction + +* **v0.1.1** : Add Id, In functions and improved README + +* **v0.1.0** : Initial release. \ No newline at end of file diff --git a/vendor/xorm.io/xorm/CONTRIBUTING.md b/vendor/xorm.io/xorm/CONTRIBUTING.md new file mode 100644 index 0000000000..a6925a5c0a --- /dev/null +++ b/vendor/xorm.io/xorm/CONTRIBUTING.md @@ -0,0 +1,87 @@ +## Contributing to xorm + +`xorm` has a backlog of [pull requests](https://help.github.com/articles/using-pull-requests), but contributions are still very +much welcome. You can help with patch review, submitting bug reports, +or adding new functionality. There is no formal style guide, but +please conform to the style of existing code and general Go formatting +conventions when submitting patches. + +* [fork a repo](https://help.github.com/articles/fork-a-repo) +* [creating a pull request ](https://help.github.com/articles/creating-a-pull-request) + +### Language + +Since `xorm` is a world-wide open source project, please describe your issues or code changes in English as soon as possible. + +### Sign your codes with comments +``` +// !! your comments + +e.g., + +// !lunny! this is comments made by lunny +``` + +### Build xorm and test it locally + +Once you write some codes on your feature branch, you could build and test locally at first. Just + +``` +make build +``` +and +``` +make test +``` + +The `make test` is an alias of `make test-sqlite`, it will run the tests on a sqlite database file. No extra thing needed to do except you need to cgo compile enviroment. + +If you write a new test method, you could run + +``` +make test-sqlite#TestMyNewMethod +``` + +that will only run the special test method. + +If you want to run another datase, you have to prepare a running database at first, and then, you could + +``` +TEST_MYSQL_HOST= TEST_MYSQL_CHARSET= TEST_MYSQL_DBNAME= TEST_MYSQL_USERNAME= TEST_MYSQL_PASSWORD= make test-mysql +``` + +or other databases: +``` +TEST_MSSQL_HOST= TEST_MSSQL_DBNAME= TEST_MSSQL_USERNAME= TEST_MSSQL_PASSWORD= make test-mssql +``` +``` +TEST_PGSQL_HOST= TEST_PGSQL_SCHEMA= TEST_PGSQL_DBNAME= TEST_PGSQL_USERNAME= TEST_PGSQL_PASSWORD= make test-postgres +``` +``` +TEST_TIDB_HOST= TEST_TIDB_DBNAME= TEST_TIDB_USERNAME= TEST_TIDB_PASSWORD= make test-tidb +``` + +And if your branch is related with cache, you could also enable it via `TEST_CACHE_ENABLE=true`. + +### Patch review + +Help review existing open [pull requests](https://help.github.com/articles/using-pull-requests) by commenting on the code or +proposed functionality. + +### Bug reports + +We appreciate any bug reports, but especially ones with self-contained +(doesn't depend on code outside of xorm), minimal (can't be simplified +further) test cases. It's especially helpful if you can submit a pull +request with just the failing test case(you can find some example test file like [session_get_test.go](https://gitea.com/xorm/xorm/src/branch/master/session_get_test.go)). + +If you implements a new database interface, you maybe need to add a test_.sh file. +For example, [mysql_test.go](https://gitea.com/xorm/xorm/src/branch/master/test_mysql.sh) + +### New functionality + +There are a number of pending patches for new functionality, so +additional feature patches will take a while to merge. Still, patches +are generally reviewed based on usefulness and complexity in addition +to time-in-queue, so if you have a knockout idea, take a shot. Feel +free to open an issue discussion your proposed patch beforehand. diff --git a/vendor/xorm.io/xorm/LICENSE b/vendor/xorm.io/xorm/LICENSE new file mode 100644 index 0000000000..84d2ae5386 --- /dev/null +++ b/vendor/xorm.io/xorm/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 - 2015 The Xorm Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/xorm.io/xorm/Makefile b/vendor/xorm.io/xorm/Makefile new file mode 100644 index 0000000000..4cccacd834 --- /dev/null +++ b/vendor/xorm.io/xorm/Makefile @@ -0,0 +1,220 @@ +IMPORT := xorm.io/xorm +export GO111MODULE=on + +GO ?= go +GOFMT ?= gofmt -s +TAGS ?= +SED_INPLACE := sed -i + +GOFILES := $(shell find . -name "*.go" -type f) +INTEGRATION_PACKAGES := xorm.io/xorm/integrations +PACKAGES ?= $(filter-out $(INTEGRATION_PACKAGES),$(shell $(GO) list ./...)) + +TEST_COCKROACH_HOST ?= cockroach:26257 +TEST_COCKROACH_SCHEMA ?= +TEST_COCKROACH_DBNAME ?= xorm_test +TEST_COCKROACH_USERNAME ?= postgres +TEST_COCKROACH_PASSWORD ?= + +TEST_MSSQL_HOST ?= mssql:1433 +TEST_MSSQL_DBNAME ?= gitea +TEST_MSSQL_USERNAME ?= sa +TEST_MSSQL_PASSWORD ?= MwantsaSecurePassword1 + +TEST_MYSQL_HOST ?= mysql:3306 +TEST_MYSQL_CHARSET ?= utf8 +TEST_MYSQL_DBNAME ?= xorm_test +TEST_MYSQL_USERNAME ?= root +TEST_MYSQL_PASSWORD ?= + +TEST_PGSQL_HOST ?= pgsql:5432 +TEST_PGSQL_SCHEMA ?= +TEST_PGSQL_DBNAME ?= xorm_test +TEST_PGSQL_USERNAME ?= postgres +TEST_PGSQL_PASSWORD ?= mysecretpassword + +TEST_TIDB_HOST ?= tidb:4000 +TEST_TIDB_DBNAME ?= xorm_test +TEST_TIDB_USERNAME ?= root +TEST_TIDB_PASSWORD ?= + +TEST_CACHE_ENABLE ?= false +TEST_QUOTE_POLICY ?= always + +.PHONY: all +all: build + +.PHONY: build +build: go-check $(GO_SOURCES) + $(GO) build $(PACKAGES) + +.PHONY: clean +clean: + $(GO) clean -i ./... + rm -rf *.sql *.log test.db *coverage.out coverage.all integrations/*.sql + +.PHONY: coverage +coverage: + @hash gocovmerge > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/wadey/gocovmerge; \ + fi + gocovmerge $(shell find . -type f -name "coverage.out") > coverage.all;\ + +.PHONY: fmt +fmt: + $(GOFMT) -w $(GOFILES) + +.PHONY: fmt-check +fmt-check: + # get all go files and run go fmt on them + @diff=$$($(GOFMT) -d $(GOFILES)); \ + if [ -n "$$diff" ]; then \ + echo "Please run 'make fmt' and commit the result:"; \ + echo "$${diff}"; \ + exit 1; \ + fi; + +.PHONY: go-check +go-check: + $(eval GO_VERSION := $(shell printf "%03d%03d%03d" $(shell go version | grep -Eo '[0-9]+\.?[0-9]+?\.?[0-9]?\s' | tr '.' ' ');)) + @if [ "$(GO_VERSION)" -lt "001011000" ]; then \ + echo "Gitea requires Go 1.11.0 or greater to build. You can get it at https://golang.org/dl/"; \ + exit 1; \ + fi + +.PHONY: help +help: + @echo "Make Routines:" + @echo " - equivalent to \"build\"" + @echo " - build creates the entire project" + @echo " - clean delete integration files and build files but not css and js files" + @echo " - fmt format the code" + @echo " - lint run code linter revive" + @echo " - misspell check if a word is written wrong" + @echo " - test run default unit test" + @echo " - test-cockroach run integration tests for cockroach" + @echo " - test-mysql run integration tests for mysql" + @echo " - test-mssql run integration tests for mssql" + @echo " - test-postgres run integration tests for postgres" + @echo " - test-sqlite run integration tests for sqlite" + @echo " - test-tidb run integration tests for tidb" + @echo " - vet examines Go source code and reports suspicious constructs" + +.PHONY: lint +lint: revive + +.PHONY: revive +revive: + @hash revive > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/mgechev/revive; \ + fi + revive -config .revive.toml -exclude=./vendor/... ./... || exit 1 + +.PHONY: misspell +misspell: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -w -i unknwon $(GOFILES) + +.PHONY: misspell-check +misspell-check: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -error -i unknwon,destory $(GOFILES) + +.PHONY: test +test: go-check + $(GO) test $(PACKAGES) + +.PNONY: test-cockroach +test-cockroach: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \ + -ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-cockroach\#% +test-cockroach\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=postgres -schema='$(TEST_COCKROACH_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_COCKROACH_USERNAME):$(TEST_COCKROACH_PASSWORD)@$(TEST_COCKROACH_HOST)/$(TEST_COCKROACH_DBNAME)?sslmode=disable&experimental_serial_normalization=sql_sequence" \ + -ignore_update_limit=true -coverprofile=cockroach.$(TEST_COCKROACH_SCHEMA).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mssql +test-mssql: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mssql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \ + -coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mssql\#% +test-mssql\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mssql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="server=$(TEST_MSSQL_HOST);user id=$(TEST_MSSQL_USERNAME);password=$(TEST_MSSQL_PASSWORD);database=$(TEST_MSSQL_DBNAME)" \ + -coverprofile=mssql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mymysql +test-mymysql: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \ + -coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mymysql\#% +test-mymysql\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mymysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="tcp:$(TEST_MYSQL_HOST)*$(TEST_MYSQL_DBNAME)/$(TEST_MYSQL_USERNAME)/$(TEST_MYSQL_PASSWORD)" \ + -coverprofile=mymysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-mysql +test-mysql: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \ + -coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-mysql\#% +test-mysql\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -quote=$(TEST_QUOTE_POLICY) \ + -conn_str="$(TEST_MYSQL_USERNAME):$(TEST_MYSQL_PASSWORD)@tcp($(TEST_MYSQL_HOST))/$(TEST_MYSQL_DBNAME)?charset=$(TEST_MYSQL_CHARSET)" \ + -coverprofile=mysql.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-postgres +test-postgres: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-postgres\#% +test-postgres\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=postgres -schema='$(TEST_PGSQL_SCHEMA)' -cache=$(TEST_CACHE_ENABLE) \ + -conn_str="postgres://$(TEST_PGSQL_USERNAME):$(TEST_PGSQL_PASSWORD)@$(TEST_PGSQL_HOST)/$(TEST_PGSQL_DBNAME)?sslmode=disable" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=postgres.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-sqlite +test-sqlite: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-sqlite-schema +test-sqlite-schema: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -schema=xorm -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-sqlite\#% +test-sqlite\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -cache=$(TEST_CACHE_ENABLE) -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=sqlite.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PNONY: test-tidb +test-tidb: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \ + -conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: test-tidb\#% +test-tidb\#%: go-check + $(GO) test $(INTEGRATION_PACKAGES) -v -race -run $* -db=mysql -cache=$(TEST_CACHE_ENABLE) -ignore_select_update=true \ + -conn_str="$(TEST_TIDB_USERNAME):$(TEST_TIDB_PASSWORD)@tcp($(TEST_TIDB_HOST))/$(TEST_TIDB_DBNAME)" \ + -quote=$(TEST_QUOTE_POLICY) -coverprofile=tidb.$(TEST_QUOTE_POLICY).$(TEST_CACHE_ENABLE).coverage.out -covermode=atomic + +.PHONY: vet +vet: + $(GO) vet $(shell $(GO) list ./...) \ No newline at end of file diff --git a/vendor/xorm.io/xorm/README.md b/vendor/xorm.io/xorm/README.md new file mode 100644 index 0000000000..ed866224f9 --- /dev/null +++ b/vendor/xorm.io/xorm/README.md @@ -0,0 +1,482 @@ +# xorm + +[中文](https://gitea.com/xorm/xorm/src/branch/master/README_CN.md) + +Xorm is a simple and powerful ORM for Go. + +[![Build Status](https://drone.gitea.com/api/badges/xorm/xorm/status.svg)](https://drone.gitea.com/xorm/xorm) [![](http://gocover.io/_badge/xorm.io/xorm)](https://gocover.io/xorm.io/xorm) [![](https://goreportcard.com/badge/xorm.io/xorm)](https://goreportcard.com/report/xorm.io/xorm) [![Join the chat at https://img.shields.io/discord/323460943201959939.svg](https://img.shields.io/discord/323460943201959939.svg)](https://discord.gg/HuR2CF3) + +## Notice + +v1.0.0 has some break changes from v0.8.2. + +- Removed some non gonic function name `Id`, `Sql`, please use `ID`, `SQL` instead. +- Removed the dependent from `xorm.io/core` and moved the codes to `xorm.io/xorm/core`, `xorm.io/xorm/names`, `xorm.io/xorm/schemas` and others. +- Renamed some interface names. i.e. `core.IMapper` -> `names.Mapper`, `core.ILogger` -> `log.Logger`. + +## Features + +* Struct <-> Table Mapping Support +* Chainable APIs +* Transaction Support +* Both ORM and raw SQL operation Support +* Sync database schema Support +* Query Cache speed up +* Database Reverse support via [xorm.io/reverse](https://xorm.io/reverse) +* Simple cascade loading support +* Optimistic Locking support +* SQL Builder support via [xorm.io/builder](https://xorm.io/builder) +* Automatical Read/Write seperatelly +* Postgres schema support +* Context Cache support +* Support log/SQLLog context + +## Drivers Support + +Drivers for Go's sql package which currently support database/sql includes: + +* [Mysql5.*](https://github.com/mysql/mysql-server/tree/5.7) / [Mysql8.*](https://github.com/mysql/mysql-server) / [Mariadb](https://github.com/MariaDB/server) / [Tidb](https://github.com/pingcap/tidb) + - [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) + - [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv) + +* [Postgres](https://github.com/postgres/postgres) / [Cockroach](https://github.com/cockroachdb/cockroach) + - [github.com/lib/pq](https://github.com/lib/pq) + +* [SQLite](https://sqlite.org) + - [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) + +* MsSql + - [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) + +* Oracle + - [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (experiment) + +## Installation + + go get xorm.io/xorm + +## Documents + +* [Manual](http://xorm.io/docs) + +* [GoDoc](http://pkg.go.dev/xorm.io/xorm) + +## Quick Start + +* Create Engine + +Firstly, we should new an engine for a database. + +```Go +engine, err := xorm.NewEngine(driverName, dataSourceName) +``` + +* Define a struct and Sync2 table struct to database + +```Go +type User struct { + Id int64 + Name string + Salt string + Age int + Passwd string `xorm:"varchar(200)"` + Created time.Time `xorm:"created"` + Updated time.Time `xorm:"updated"` +} + +err := engine.Sync2(new(User)) +``` + +* Create Engine Group + +```Go +dataSourceNameSlice := []string{masterDataSourceName, slave1DataSourceName, slave2DataSourceName} +engineGroup, err := xorm.NewEngineGroup(driverName, dataSourceNameSlice) +``` + +```Go +masterEngine, err := xorm.NewEngine(driverName, masterDataSourceName) +slave1Engine, err := xorm.NewEngine(driverName, slave1DataSourceName) +slave2Engine, err := xorm.NewEngine(driverName, slave2DataSourceName) +engineGroup, err := xorm.NewEngineGroup(masterEngine, []*Engine{slave1Engine, slave2Engine}) +``` + +Then all place where `engine` you can just use `engineGroup`. + +* `Query` runs a SQL string, the returned results is `[]map[string][]byte`, `QueryString` returns `[]map[string]string`, `QueryInterface` returns `[]map[string]interface{}`. + +```Go +results, err := engine.Query("select * from user") +results, err := engine.Where("a = 1").Query() + +results, err := engine.QueryString("select * from user") +results, err := engine.Where("a = 1").QueryString() + +results, err := engine.QueryInterface("select * from user") +results, err := engine.Where("a = 1").QueryInterface() +``` + +* `Exec` runs a SQL string, it returns `affected` and `error` + +```Go +affected, err := engine.Exec("update user set age = ? where name = ?", age, name) +``` + +* `Insert` one or multiple records to database + +```Go +affected, err := engine.Insert(&user) +// INSERT INTO struct () values () + +affected, err := engine.Insert(&user1, &user2) +// INSERT INTO struct1 () values () +// INSERT INTO struct2 () values () + +affected, err := engine.Insert(&users) +// INSERT INTO struct () values (),(),() + +affected, err := engine.Insert(&user1, &users) +// INSERT INTO struct1 () values () +// INSERT INTO struct2 () values (),(),() +``` + +* `Get` query one record from database + +```Go +has, err := engine.Get(&user) +// SELECT * FROM user LIMIT 1 + +has, err := engine.Where("name = ?", name).Desc("id").Get(&user) +// SELECT * FROM user WHERE name = ? ORDER BY id DESC LIMIT 1 + +var name string +has, err := engine.Table(&user).Where("id = ?", id).Cols("name").Get(&name) +// SELECT name FROM user WHERE id = ? + +var id int64 +has, err := engine.Table(&user).Where("name = ?", name).Cols("id").Get(&id) +has, err := engine.SQL("select id from user").Get(&id) +// SELECT id FROM user WHERE name = ? + +var valuesMap = make(map[string]string) +has, err := engine.Table(&user).Where("id = ?", id).Get(&valuesMap) +// SELECT * FROM user WHERE id = ? + +var valuesSlice = make([]interface{}, len(cols)) +has, err := engine.Table(&user).Where("id = ?", id).Cols(cols...).Get(&valuesSlice) +// SELECT col1, col2, col3 FROM user WHERE id = ? +``` + +* `Exist` check if one record exist on table + +```Go +has, err := testEngine.Exist(new(RecordExist)) +// SELECT * FROM record_exist LIMIT 1 + +has, err = testEngine.Exist(&RecordExist{ + Name: "test1", + }) +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 + +has, err = testEngine.Where("name = ?", "test1").Exist(&RecordExist{}) +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 + +has, err = testEngine.SQL("select * from record_exist where name = ?", "test1").Exist() +// select * from record_exist where name = ? + +has, err = testEngine.Table("record_exist").Exist() +// SELECT * FROM record_exist LIMIT 1 + +has, err = testEngine.Table("record_exist").Where("name = ?", "test1").Exist() +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 +``` + +* `Find` query multiple records from database, also you can use join and extends + +```Go +var users []User +err := engine.Where("name = ?", name).And("age > 10").Limit(10, 0).Find(&users) +// SELECT * FROM user WHERE name = ? AND age > 10 limit 10 offset 0 + +type Detail struct { + Id int64 + UserId int64 `xorm:"index"` +} + +type UserDetail struct { + User `xorm:"extends"` + Detail `xorm:"extends"` +} + +var users []UserDetail +err := engine.Table("user").Select("user.*, detail.*"). + Join("INNER", "detail", "detail.user_id = user.id"). + Where("user.name = ?", name).Limit(10, 0). + Find(&users) +// SELECT user.*, detail.* FROM user INNER JOIN detail WHERE user.name = ? limit 10 offset 0 +``` + +* `Iterate` and `Rows` query multiple records and record by record handle, there are two methods Iterate and Rows + +```Go +err := engine.Iterate(&User{Name:name}, func(idx int, bean interface{}) error { + user := bean.(*User) + return nil +}) +// SELECT * FROM user + +err := engine.BufferSize(100).Iterate(&User{Name:name}, func(idx int, bean interface{}) error { + user := bean.(*User) + return nil +}) +// SELECT * FROM user Limit 0, 100 +// SELECT * FROM user Limit 101, 100 + +rows, err := engine.Rows(&User{Name:name}) +// SELECT * FROM user +defer rows.Close() +bean := new(Struct) +for rows.Next() { + err = rows.Scan(bean) +} +``` + +* `Update` update one or more records, default will update non-empty and non-zero fields except when you use Cols, AllCols and so on. + +```Go +affected, err := engine.ID(1).Update(&user) +// UPDATE user SET ... Where id = ? + +affected, err := engine.Update(&user, &User{Name:name}) +// UPDATE user SET ... Where name = ? + +var ids = []int64{1, 2, 3} +affected, err := engine.In("id", ids).Update(&user) +// UPDATE user SET ... Where id IN (?, ?, ?) + +// force update indicated columns by Cols +affected, err := engine.ID(1).Cols("age").Update(&User{Name:name, Age: 12}) +// UPDATE user SET age = ?, updated=? Where id = ? + +// force NOT update indicated columns by Omit +affected, err := engine.ID(1).Omit("name").Update(&User{Name:name, Age: 12}) +// UPDATE user SET age = ?, updated=? Where id = ? + +affected, err := engine.ID(1).AllCols().Update(&user) +// UPDATE user SET name=?,age=?,salt=?,passwd=?,updated=? Where id = ? +``` + +* `Delete` delete one or more records, Delete MUST have condition + +```Go +affected, err := engine.Where(...).Delete(&user) +// DELETE FROM user Where ... + +affected, err := engine.ID(2).Delete(&user) +// DELETE FROM user Where id = ? +``` + +* `Count` count records + +```Go +counts, err := engine.Count(&user) +// SELECT count(*) AS total FROM user +``` + +* `FindAndCount` combines function `Find` with `Count` which is usually used in query by page + +```Go +var users []User +counts, err := engine.FindAndCount(&users) +``` + +* `Sum` sum functions + +```Go +agesFloat64, err := engine.Sum(&user, "age") +// SELECT sum(age) AS total FROM user + +agesInt64, err := engine.SumInt(&user, "age") +// SELECT sum(age) AS total FROM user + +sumFloat64Slice, err := engine.Sums(&user, "age", "score") +// SELECT sum(age), sum(score) FROM user + +sumInt64Slice, err := engine.SumsInt(&user, "age", "score") +// SELECT sum(age), sum(score) FROM user +``` + +* Query conditions builder + +```Go +err := engine.Where(builder.NotIn("a", 1, 2).And(builder.In("b", "c", "d", "e"))).Find(&users) +// SELECT id, name ... FROM user WHERE a NOT IN (?, ?) AND b IN (?, ?, ?) +``` + +* Multiple operations in one go routine, no transation here but resue session memory + +```Go +session := engine.NewSession() +defer session.Close() + +user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} +if _, err := session.Insert(&user1); err != nil { + return err +} + +user2 := Userinfo{Username: "yyy"} +if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return err +} + +if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return err +} + +return nil +``` + +* Transation should be on one go routine. There is transaction and resue session memory + +```Go +session := engine.NewSession() +defer session.Close() + +// add Begin() before any action +if err := session.Begin(); err != nil { + // if returned then will rollback automatically + return err +} + +user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} +if _, err := session.Insert(&user1); err != nil { + return err +} + +user2 := Userinfo{Username: "yyy"} +if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return err +} + +if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return err +} + +// add Commit() after all actions +return session.Commit() +``` + +* Or you can use `Transaction` to replace above codes. + +```Go +res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) { + user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} + if _, err := session.Insert(&user1); err != nil { + return nil, err + } + + user2 := Userinfo{Username: "yyy"} + if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return nil, err + } + + if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return nil, err + } + return nil, nil +}) +``` + +* Context Cache, if enabled, current query result will be cached on session and be used by next same statement on the same session. + +```Go + sess := engine.NewSession() + defer sess.Close() + + var context = xorm.NewMemoryContextCache() + + var c2 ContextGetStruct + has, err := sess.ID(1).ContextCache(context).Get(&c2) + assert.NoError(t, err) + assert.True(t, has) + assert.EqualValues(t, 1, c2.Id) + assert.EqualValues(t, "1", c2.Name) + sql, args := sess.LastSQL() + assert.True(t, len(sql) > 0) + assert.True(t, len(args) > 0) + + var c3 ContextGetStruct + has, err = sess.ID(1).ContextCache(context).Get(&c3) + assert.NoError(t, err) + assert.True(t, has) + assert.EqualValues(t, 1, c3.Id) + assert.EqualValues(t, "1", c3.Name) + sql, args = sess.LastSQL() + assert.True(t, len(sql) == 0) + assert.True(t, len(args) == 0) +``` + +## Contributing + +If you want to pull request, please see [CONTRIBUTING](https://gitea.com/xorm/xorm/src/branch/master/CONTRIBUTING.md). And you can also go to [Xorm on discourse](https://xorm.discourse.group) to discuss. + +## Credits + +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + + +### Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/xorm#backer)] + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/xorm#sponsor)] + +## Changelog + +You can find all the changelog [here](CHANGELOG.md) + +## Cases + +* [studygolang](http://studygolang.com/) - [github.com/studygolang/studygolang](https://github.com/studygolang/studygolang) + +* [Gitea](http://gitea.io) - [github.com/go-gitea/gitea](http://github.com/go-gitea/gitea) + +* [Gogs](http://try.gogits.org) - [github.com/gogits/gogs](http://github.com/gogits/gogs) + +* [grafana](https://grafana.com/) - [github.com/grafana/grafana](http://github.com/grafana/grafana) + +* [github.com/m3ng9i/qreader](https://github.com/m3ng9i/qreader) + +* [Wego](http://github.com/go-tango/wego) + +* [Docker.cn](https://docker.cn/) + +* [Xorm Adapter](https://github.com/casbin/xorm-adapter) for [Casbin](https://github.com/casbin/casbin) - [github.com/casbin/xorm-adapter](https://github.com/casbin/xorm-adapter) + +* [Gorevel](http://gorevel.cn/) - [github.com/goofcc/gorevel](http://github.com/goofcc/gorevel) + +* [Gowalker](http://gowalker.org) - [github.com/Unknwon/gowalker](http://github.com/Unknwon/gowalker) + +* [Gobuild.io](http://gobuild.io) - [github.com/shxsun/gobuild](http://github.com/shxsun/gobuild) + +* [Sudo China](http://sudochina.com) - [github.com/insionng/toropress](http://github.com/insionng/toropress) + +* [Godaily](http://godaily.org) - [github.com/govc/godaily](http://github.com/govc/godaily) + +* [YouGam](http://www.yougam.com/) + +* [GoCMS - github.com/zzboy/GoCMS](https://github.com/zzdboy/GoCMS) + +* [GoBBS - gobbs.domolo.com](http://gobbs.domolo.com/) + +* [go-blog](http://wangcheng.me) - [github.com/easykoo/go-blog](https://github.com/easykoo/go-blog) + +## LICENSE + +BSD License [http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/) diff --git a/vendor/xorm.io/xorm/README_CN.md b/vendor/xorm.io/xorm/README_CN.md new file mode 100644 index 0000000000..80245dd33f --- /dev/null +++ b/vendor/xorm.io/xorm/README_CN.md @@ -0,0 +1,474 @@ +# xorm + +[English](https://gitea.com/xorm/xorm/src/branch/master/README.md) + +xorm 是一个简单而强大的Go语言ORM库. 通过它可以使数据库操作非常简便。 + +[![Build Status](https://drone.gitea.com/api/badges/xorm/xorm/status.svg)](https://drone.gitea.com/xorm/xorm) [![](http://gocover.io/_badge/xorm.io/xorm)](https://gocover.io/xorm.io/xorm) [![](https://goreportcard.com/badge/xorm.io/xorm)](https://goreportcard.com/report/xorm.io/xorm) [![Join the chat at https://img.shields.io/discord/323460943201959939.svg](https://img.shields.io/discord/323460943201959939.svg)](https://discord.gg/HuR2CF3) + +## Notice + +v1.0.0 相对于 v0.8.2 有以下不兼容的变更: + +- 移除了部分不符合Go语言命名的函数,如 `Id`, `Sql`,请使用 `ID`, `SQL` 替代。 +- 删除了对 `xorm.io/core` 的依赖。大部分代码迁移到了 `xorm.io/xorm/core`, `xorm.io/xorm/names`, `xorm.io/xorm/schemas` 等等几个包中. +- 重命名了几个结构体,如: `core.IMapper` -> `names.Mapper`, `core.ILogger` -> `log.Logger`. + +## 特性 + +* 支持 Struct 和数据库表之间的灵活映射,并支持自动同步 +* 事务支持 +* 同时支持原始SQL语句和ORM操作的混合执行 +* 使用连写来简化调用 +* 支持使用ID, In, Where, Limit, Join, Having, Table, SQL, Cols等函数和结构体等方式作为条件 +* 支持级联加载Struct +* Schema支持(仅Postgres) +* 支持缓存 +* 通过 [xorm.io/reverse](https://xorm.io/reverse) 支持根据数据库自动生成 xorm 结构体 +* 支持记录版本(即乐观锁) +* 通过 [xorm.io/builder](https://xorm.io/builder) 内置 SQL Builder 支持 +* 上下文缓存支持 +* 支持日志上下文 + +## 驱动支持 + +目前支持的Go数据库驱动和对应的数据库如下: + +* [Mysql5.*](https://github.com/mysql/mysql-server/tree/5.7) / [Mysql8.*](https://github.com/mysql/mysql-server) / [Mariadb](https://github.com/MariaDB/server) / [Tidb](https://github.com/pingcap/tidb) + - [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) + - [github.com/ziutek/mymysql/godrv](https://github.com/ziutek/mymysql/godrv) + +* [Postgres](https://github.com/postgres/postgres) / [Cockroach](https://github.com/cockroachdb/cockroach) + - [github.com/lib/pq](https://github.com/lib/pq) + +* [SQLite](https://sqlite.org) + - [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) + +* MsSql + - [github.com/denisenkom/go-mssqldb](https://github.com/denisenkom/go-mssqldb) + +* Oracle + - [github.com/mattn/go-oci8](https://github.com/mattn/go-oci8) (试验性支持) + +## 安装 + + go get xorm.io/xorm + +## 文档 + +* [操作指南](http://xorm.io/docs) + +* [Godoc代码文档](http://pkg.go.dev/xorm.io/xorm) + +# 快速开始 + +* 第一步创建引擎,driverName, dataSourceName和database/sql接口相同 + +```Go +engine, err := xorm.NewEngine(driverName, dataSourceName) +``` + +* 定义一个和表同步的结构体,并且自动同步结构体到数据库 + +```Go +type User struct { + Id int64 + Name string + Salt string + Age int + Passwd string `xorm:"varchar(200)"` + Created time.Time `xorm:"created"` + Updated time.Time `xorm:"updated"` +} + +err := engine.Sync2(new(User)) +``` + +* 创建Engine组 + +```Go +dataSourceNameSlice := []string{masterDataSourceName, slave1DataSourceName, slave2DataSourceName} +engineGroup, err := xorm.NewEngineGroup(driverName, dataSourceNameSlice) +``` + +```Go +masterEngine, err := xorm.NewEngine(driverName, masterDataSourceName) +slave1Engine, err := xorm.NewEngine(driverName, slave1DataSourceName) +slave2Engine, err := xorm.NewEngine(driverName, slave2DataSourceName) +engineGroup, err := xorm.NewEngineGroup(masterEngine, []*Engine{slave1Engine, slave2Engine}) +``` + +所有使用 `engine` 都可以简单的用 `engineGroup` 来替换。 + +* `Query` 最原始的也支持SQL语句查询,返回的结果类型为 []map[string][]byte。`QueryString` 返回 []map[string]string, `QueryInterface` 返回 `[]map[string]interface{}`. + +```Go +results, err := engine.Query("select * from user") +results, err := engine.Where("a = 1").Query() + +results, err := engine.QueryString("select * from user") +results, err := engine.Where("a = 1").QueryString() + +results, err := engine.QueryInterface("select * from user") +results, err := engine.Where("a = 1").QueryInterface() +``` + +* `Exec` 执行一个SQL语句 + +```Go +affected, err := engine.Exec("update user set age = ? where name = ?", age, name) +``` + +* `Insert` 插入一条或者多条记录 + +```Go +affected, err := engine.Insert(&user) +// INSERT INTO struct () values () + +affected, err := engine.Insert(&user1, &user2) +// INSERT INTO struct1 () values () +// INSERT INTO struct2 () values () + +affected, err := engine.Insert(&users) +// INSERT INTO struct () values (),(),() + +affected, err := engine.Insert(&user1, &users) +// INSERT INTO struct1 () values () +// INSERT INTO struct2 () values (),(),() +``` + +* `Get` 查询单条记录 + +```Go +has, err := engine.Get(&user) +// SELECT * FROM user LIMIT 1 + +has, err := engine.Where("name = ?", name).Desc("id").Get(&user) +// SELECT * FROM user WHERE name = ? ORDER BY id DESC LIMIT 1 + +var name string +has, err := engine.Table(&user).Where("id = ?", id).Cols("name").Get(&name) +// SELECT name FROM user WHERE id = ? + +var id int64 +has, err := engine.Table(&user).Where("name = ?", name).Cols("id").Get(&id) +has, err := engine.SQL("select id from user").Get(&id) +// SELECT id FROM user WHERE name = ? + +var valuesMap = make(map[string]string) +has, err := engine.Table(&user).Where("id = ?", id).Get(&valuesMap) +// SELECT * FROM user WHERE id = ? + +var valuesSlice = make([]interface{}, len(cols)) +has, err := engine.Table(&user).Where("id = ?", id).Cols(cols...).Get(&valuesSlice) +// SELECT col1, col2, col3 FROM user WHERE id = ? +``` + +* `Exist` 检测记录是否存在 + +```Go +has, err := testEngine.Exist(new(RecordExist)) +// SELECT * FROM record_exist LIMIT 1 + +has, err = testEngine.Exist(&RecordExist{ + Name: "test1", + }) +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 + +has, err = testEngine.Where("name = ?", "test1").Exist(&RecordExist{}) +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 + +has, err = testEngine.SQL("select * from record_exist where name = ?", "test1").Exist() +// select * from record_exist where name = ? + +has, err = testEngine.Table("record_exist").Exist() +// SELECT * FROM record_exist LIMIT 1 + +has, err = testEngine.Table("record_exist").Where("name = ?", "test1").Exist() +// SELECT * FROM record_exist WHERE name = ? LIMIT 1 +``` + +* `Find` 查询多条记录,当然可以使用Join和extends来组合使用 + +```Go +var users []User +err := engine.Where("name = ?", name).And("age > 10").Limit(10, 0).Find(&users) +// SELECT * FROM user WHERE name = ? AND age > 10 limit 10 offset 0 + +type Detail struct { + Id int64 + UserId int64 `xorm:"index"` +} + +type UserDetail struct { + User `xorm:"extends"` + Detail `xorm:"extends"` +} + +var users []UserDetail +err := engine.Table("user").Select("user.*, detail.*") + Join("INNER", "detail", "detail.user_id = user.id"). + Where("user.name = ?", name).Limit(10, 0). + Find(&users) +// SELECT user.*, detail.* FROM user INNER JOIN detail WHERE user.name = ? limit 10 offset 0 +``` + +* `Iterate` 和 `Rows` 根据条件遍历数据库,可以有两种方式: Iterate and Rows + +```Go +err := engine.Iterate(&User{Name:name}, func(idx int, bean interface{}) error { + user := bean.(*User) + return nil +}) +// SELECT * FROM user + +err := engine.BufferSize(100).Iterate(&User{Name:name}, func(idx int, bean interface{}) error { + user := bean.(*User) + return nil +}) +// SELECT * FROM user Limit 0, 100 +// SELECT * FROM user Limit 101, 100 + +rows, err := engine.Rows(&User{Name:name}) +// SELECT * FROM user +defer rows.Close() +bean := new(Struct) +for rows.Next() { + err = rows.Scan(bean) +} +``` + +* `Update` 更新数据,除非使用Cols,AllCols函数指明,默认只更新非空和非0的字段 + +```Go +affected, err := engine.ID(1).Update(&user) +// UPDATE user SET ... Where id = ? + +affected, err := engine.Update(&user, &User{Name:name}) +// UPDATE user SET ... Where name = ? + +var ids = []int64{1, 2, 3} +affected, err := engine.In(ids).Update(&user) +// UPDATE user SET ... Where id IN (?, ?, ?) + +// force update indicated columns by Cols +affected, err := engine.ID(1).Cols("age").Update(&User{Name:name, Age: 12}) +// UPDATE user SET age = ?, updated=? Where id = ? + +// force NOT update indicated columns by Omit +affected, err := engine.ID(1).Omit("name").Update(&User{Name:name, Age: 12}) +// UPDATE user SET age = ?, updated=? Where id = ? + +affected, err := engine.ID(1).AllCols().Update(&user) +// UPDATE user SET name=?,age=?,salt=?,passwd=?,updated=? Where id = ? +``` + +* `Delete` 删除记录,需要注意,删除必须至少有一个条件,否则会报错。要清空数据库可以用EmptyTable + +```Go +affected, err := engine.Where(...).Delete(&user) +// DELETE FROM user Where ... + +affected, err := engine.ID(2).Delete(&user) +// DELETE FROM user Where id = ? +``` + +* `Count` 获取记录条数 + +```Go +counts, err := engine.Count(&user) +// SELECT count(*) AS total FROM user +``` + +* `Sum` 求和函数 + +```Go +agesFloat64, err := engine.Sum(&user, "age") +// SELECT sum(age) AS total FROM user + +agesInt64, err := engine.SumInt(&user, "age") +// SELECT sum(age) AS total FROM user + +sumFloat64Slice, err := engine.Sums(&user, "age", "score") +// SELECT sum(age), sum(score) FROM user + +sumInt64Slice, err := engine.SumsInt(&user, "age", "score") +// SELECT sum(age), sum(score) FROM user +``` + +* 条件编辑器 + +```Go +err := engine.Where(builder.NotIn("a", 1, 2).And(builder.In("b", "c", "d", "e"))).Find(&users) +// SELECT id, name ... FROM user WHERE a NOT IN (?, ?) AND b IN (?, ?, ?) +``` + +* 在一个Go程中多次操作数据库,但没有事务 + +```Go +session := engine.NewSession() +defer session.Close() + +user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} +if _, err := session.Insert(&user1); err != nil { + return err +} + +user2 := Userinfo{Username: "yyy"} +if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return err +} + +if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return err +} + +return nil +``` + +* 在一个Go程中有事务 + +```Go +session := engine.NewSession() +defer session.Close() + +// add Begin() before any action +if err := session.Begin(); err != nil { + // if returned then will rollback automatically + return err +} + +user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} +if _, err := session.Insert(&user1); err != nil { + return err +} + +user2 := Userinfo{Username: "yyy"} +if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return err +} + +if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return err +} + +// add Commit() after all actions +return session.Commit() +``` + +* 事务的简写方法 + +```Go +res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) { + user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} + if _, err := session.Insert(&user1); err != nil { + return nil, err + } + + user2 := Userinfo{Username: "yyy"} + if _, err := session.Where("id = ?", 2).Update(&user2); err != nil { + return nil, err + } + + if _, err := session.Exec("delete from userinfo where username = ?", user2.Username); err != nil { + return nil, err + } + return nil, nil +}) +``` + +* 上下文缓存,如果启用,那么针对单个对象的查询将会被缓存到系统中,可以被下一个查询使用。 + +```Go + sess := engine.NewSession() + defer sess.Close() + + var context = xorm.NewMemoryContextCache() + + var c2 ContextGetStruct + has, err := sess.ID(1).ContextCache(context).Get(&c2) + assert.NoError(t, err) + assert.True(t, has) + assert.EqualValues(t, 1, c2.Id) + assert.EqualValues(t, "1", c2.Name) + sql, args := sess.LastSQL() + assert.True(t, len(sql) > 0) + assert.True(t, len(args) > 0) + + var c3 ContextGetStruct + has, err = sess.ID(1).ContextCache(context).Get(&c3) + assert.NoError(t, err) + assert.True(t, has) + assert.EqualValues(t, 1, c3.Id) + assert.EqualValues(t, "1", c3.Name) + sql, args = sess.LastSQL() + assert.True(t, len(sql) == 0) + assert.True(t, len(args) == 0) +``` + +## 贡献 + +如果您也想为Xorm贡献您的力量,请查看 [CONTRIBUTING](https://gitea.com/xorm/xorm/src/branch/master/CONTRIBUTING.md)。您也可以加入QQ群 技术帮助和讨论。 +群一:280360085 (已满) +群二:795010183 + +## Credits + +### Contributors + +感谢所有的贡献者. [[Contribute](CONTRIBUTING.md)]. + + +### Backers + +感谢我们所有的 backers! 🙏 [[成为 backer](https://opencollective.com/xorm#backer)] + + + +### Sponsors + +成为 sponsor 来支持 xorm。您的 logo 将会被显示并被链接到您的网站。 [[成为 sponsor](https://opencollective.com/xorm#sponsor)] + +# 案例 + +* [Gitea](http://gitea.io) - [github.com/go-gitea/gitea](http://github.com/go-gitea/gitea) + +* [Gogs](http://try.gogits.org) - [github.com/gogits/gogs](http://github.com/gogits/gogs) + +* [grafana](https://grafana.com/) - [github.com/grafana/grafana](http://github.com/grafana/grafana) + +* [Go语言中文网](http://studygolang.com/) - [github.com/studygolang/studygolang](https://github.com/studygolang/studygolang) + +* [github.com/m3ng9i/qreader](https://github.com/m3ng9i/qreader) + +* [Wego](http://github.com/go-tango/wego) + +* [Docker.cn](https://docker.cn/) + +* [Xorm Adapter](https://github.com/casbin/xorm-adapter) for [Casbin](https://github.com/casbin/casbin) - [github.com/casbin/xorm-adapter](https://github.com/casbin/xorm-adapter) + +* [Gowalker](http://gowalker.org) - [github.com/Unknwon/gowalker](http://github.com/Unknwon/gowalker) + +* [Gobuild.io](http://gobuild.io) - [github.com/shxsun/gobuild](http://github.com/shxsun/gobuild) + +* [Sudo China](http://sudochina.com) - [github.com/insionng/toropress](http://github.com/insionng/toropress) + +* [Godaily](http://godaily.org) - [github.com/govc/godaily](http://github.com/govc/godaily) + +* [YouGam](http://www.yougam.com/) + +* [GoCMS - github.com/zzboy/GoCMS](https://github.com/zzdboy/GoCMS) + +* [GoBBS - gobbs.domolo.com](http://gobbs.domolo.com/) + +* [go-blog](http://wangcheng.me) - [github.com/easykoo/go-blog](https://github.com/easykoo/go-blog) + + +## 更新日志 + +请访问 [CHANGELOG.md](CHANGELOG.md) 获得更新日志。 + +## LICENSE + +BSD License +[http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/) diff --git a/vendor/xorm.io/xorm/caches/cache.go b/vendor/xorm.io/xorm/caches/cache.go new file mode 100644 index 0000000000..7b80eb88d5 --- /dev/null +++ b/vendor/xorm.io/xorm/caches/cache.go @@ -0,0 +1,99 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "strings" + "time" + + "xorm.io/xorm/schemas" +) + +const ( + // CacheExpired is default cache expired time + CacheExpired = 60 * time.Minute + // CacheMaxMemory is not use now + CacheMaxMemory = 256 + // CacheGcInterval represents interval time to clear all expired nodes + CacheGcInterval = 10 * time.Minute + // CacheGcMaxRemoved represents max nodes removed when gc + CacheGcMaxRemoved = 20 +) + +// list all the errors +var ( + ErrCacheMiss = errors.New("xorm/cache: key not found") + ErrNotStored = errors.New("xorm/cache: not stored") + // ErrNotExist record does not exist error + ErrNotExist = errors.New("Record does not exist") +) + +// CacheStore is a interface to store cache +type CacheStore interface { + // key is primary key or composite primary key + // value is struct's pointer + // key format : -p--... + Put(key string, value interface{}) error + Get(key string) (interface{}, error) + Del(key string) error +} + +// Cacher is an interface to provide cache +// id format : u--... +type Cacher interface { + GetIds(tableName, sql string) interface{} + GetBean(tableName string, id string) interface{} + PutIds(tableName, sql string, ids interface{}) + PutBean(tableName string, id string, obj interface{}) + DelIds(tableName, sql string) + DelBean(tableName string, id string) + ClearIds(tableName string) + ClearBeans(tableName string) +} + +func encodeIds(ids []schemas.PK) (string, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + err := enc.Encode(ids) + + return buf.String(), err +} + +func decodeIds(s string) ([]schemas.PK, error) { + pks := make([]schemas.PK, 0) + + dec := gob.NewDecoder(strings.NewReader(s)) + err := dec.Decode(&pks) + + return pks, err +} + +// GetCacheSql returns cacher PKs via SQL +func GetCacheSql(m Cacher, tableName, sql string, args interface{}) ([]schemas.PK, error) { + bytes := m.GetIds(tableName, GenSqlKey(sql, args)) + if bytes == nil { + return nil, errors.New("Not Exist") + } + return decodeIds(bytes.(string)) +} + +// PutCacheSql puts cacher SQL and PKs +func PutCacheSql(m Cacher, ids []schemas.PK, tableName, sql string, args interface{}) error { + bytes, err := encodeIds(ids) + if err != nil { + return err + } + m.PutIds(tableName, GenSqlKey(sql, args), bytes) + return nil +} + +// GenSqlKey generates cache key +func GenSqlKey(sql string, args interface{}) string { + return fmt.Sprintf("%v-%v", sql, args) +} diff --git a/vendor/xorm.io/xorm/caches/encode.go b/vendor/xorm.io/xorm/caches/encode.go new file mode 100644 index 0000000000..4ba39924a5 --- /dev/null +++ b/vendor/xorm.io/xorm/caches/encode.go @@ -0,0 +1,58 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "bytes" + "crypto/md5" + "encoding/gob" + "encoding/json" + "fmt" + "io" +) + +// md5 hash string +func Md5(str string) string { + m := md5.New() + io.WriteString(m, str) + return fmt.Sprintf("%x", m.Sum(nil)) +} +func Encode(data interface{}) ([]byte, error) { + //return JsonEncode(data) + return GobEncode(data) +} + +func Decode(data []byte, to interface{}) error { + //return JsonDecode(data, to) + return GobDecode(data, to) +} + +func GobEncode(data interface{}) ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(&data) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func GobDecode(data []byte, to interface{}) error { + buf := bytes.NewBuffer(data) + dec := gob.NewDecoder(buf) + return dec.Decode(to) +} + +func JsonEncode(data interface{}) ([]byte, error) { + val, err := json.Marshal(data) + if err != nil { + return nil, err + } + return val, nil +} + +func JsonDecode(data []byte, to interface{}) error { + return json.Unmarshal(data, to) +} diff --git a/vendor/xorm.io/xorm/caches/leveldb.go b/vendor/xorm.io/xorm/caches/leveldb.go new file mode 100644 index 0000000000..d1a177ad05 --- /dev/null +++ b/vendor/xorm.io/xorm/caches/leveldb.go @@ -0,0 +1,94 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "log" + + "github.com/syndtr/goleveldb/leveldb" +) + +// LevelDBStore implements CacheStore provide local machine +type LevelDBStore struct { + store *leveldb.DB + Debug bool + v interface{} +} + +var _ CacheStore = &LevelDBStore{} + +func NewLevelDBStore(dbfile string) (*LevelDBStore, error) { + db := &LevelDBStore{} + h, err := leveldb.OpenFile(dbfile, nil) + if err != nil { + return nil, err + } + db.store = h + return db, nil +} + +func (s *LevelDBStore) Put(key string, value interface{}) error { + val, err := Encode(value) + if err != nil { + if s.Debug { + log.Println("[LevelDB]EncodeErr: ", err, "Key:", key) + } + return err + } + err = s.store.Put([]byte(key), val, nil) + if err != nil { + if s.Debug { + log.Println("[LevelDB]PutErr: ", err, "Key:", key) + } + return err + } + if s.Debug { + log.Println("[LevelDB]Put: ", key) + } + return err +} + +func (s *LevelDBStore) Get(key string) (interface{}, error) { + data, err := s.store.Get([]byte(key), nil) + if err != nil { + if s.Debug { + log.Println("[LevelDB]GetErr: ", err, "Key:", key) + } + if err == leveldb.ErrNotFound { + return nil, ErrNotExist + } + return nil, err + } + + err = Decode(data, &s.v) + if err != nil { + if s.Debug { + log.Println("[LevelDB]DecodeErr: ", err, "Key:", key) + } + return nil, err + } + if s.Debug { + log.Println("[LevelDB]Get: ", key, s.v) + } + return s.v, err +} + +func (s *LevelDBStore) Del(key string) error { + err := s.store.Delete([]byte(key), nil) + if err != nil { + if s.Debug { + log.Println("[LevelDB]DelErr: ", err, "Key:", key) + } + return err + } + if s.Debug { + log.Println("[LevelDB]Del: ", key) + } + return err +} + +func (s *LevelDBStore) Close() { + s.store.Close() +} diff --git a/vendor/xorm.io/xorm/caches/lru.go b/vendor/xorm.io/xorm/caches/lru.go new file mode 100644 index 0000000000..6b45ac944d --- /dev/null +++ b/vendor/xorm.io/xorm/caches/lru.go @@ -0,0 +1,282 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "container/list" + "fmt" + "sync" + "time" +) + +// LRUCacher implments cache object facilities +type LRUCacher struct { + idList *list.List + sqlList *list.List + idIndex map[string]map[string]*list.Element + sqlIndex map[string]map[string]*list.Element + store CacheStore + mutex sync.Mutex + MaxElementSize int + Expired time.Duration + GcInterval time.Duration +} + +// NewLRUCacher creates a cacher +func NewLRUCacher(store CacheStore, maxElementSize int) *LRUCacher { + return NewLRUCacher2(store, 3600*time.Second, maxElementSize) +} + +// NewLRUCacher2 creates a cache include different params +func NewLRUCacher2(store CacheStore, expired time.Duration, maxElementSize int) *LRUCacher { + cacher := &LRUCacher{store: store, idList: list.New(), + sqlList: list.New(), Expired: expired, + GcInterval: CacheGcInterval, MaxElementSize: maxElementSize, + sqlIndex: make(map[string]map[string]*list.Element), + idIndex: make(map[string]map[string]*list.Element), + } + cacher.RunGC() + return cacher +} + +// RunGC run once every m.GcInterval +func (m *LRUCacher) RunGC() { + time.AfterFunc(m.GcInterval, func() { + m.RunGC() + m.GC() + }) +} + +// GC check ids lit and sql list to remove all element expired +func (m *LRUCacher) GC() { + m.mutex.Lock() + defer m.mutex.Unlock() + var removedNum int + for e := m.idList.Front(); e != nil; { + if removedNum <= CacheGcMaxRemoved && + time.Now().Sub(e.Value.(*idNode).lastVisit) > m.Expired { + removedNum++ + next := e.Next() + node := e.Value.(*idNode) + m.delBean(node.tbName, node.id) + e = next + } else { + break + } + } + + removedNum = 0 + for e := m.sqlList.Front(); e != nil; { + if removedNum <= CacheGcMaxRemoved && + time.Now().Sub(e.Value.(*sqlNode).lastVisit) > m.Expired { + removedNum++ + next := e.Next() + node := e.Value.(*sqlNode) + m.delIds(node.tbName, node.sql) + e = next + } else { + break + } + } +} + +// GetIds returns all bean's ids according to sql and parameter from cache +func (m *LRUCacher) GetIds(tableName, sql string) interface{} { + m.mutex.Lock() + defer m.mutex.Unlock() + if _, ok := m.sqlIndex[tableName]; !ok { + m.sqlIndex[tableName] = make(map[string]*list.Element) + } + if v, err := m.store.Get(sql); err == nil { + if el, ok := m.sqlIndex[tableName][sql]; !ok { + el = m.sqlList.PushBack(newSQLNode(tableName, sql)) + m.sqlIndex[tableName][sql] = el + } else { + lastTime := el.Value.(*sqlNode).lastVisit + // if expired, remove the node and return nil + if time.Now().Sub(lastTime) > m.Expired { + m.delIds(tableName, sql) + return nil + } + m.sqlList.MoveToBack(el) + el.Value.(*sqlNode).lastVisit = time.Now() + } + return v + } + + m.delIds(tableName, sql) + return nil +} + +// GetBean returns bean according tableName and id from cache +func (m *LRUCacher) GetBean(tableName string, id string) interface{} { + m.mutex.Lock() + defer m.mutex.Unlock() + if _, ok := m.idIndex[tableName]; !ok { + m.idIndex[tableName] = make(map[string]*list.Element) + } + tid := genID(tableName, id) + if v, err := m.store.Get(tid); err == nil { + if el, ok := m.idIndex[tableName][id]; ok { + lastTime := el.Value.(*idNode).lastVisit + // if expired, remove the node and return nil + if time.Now().Sub(lastTime) > m.Expired { + m.delBean(tableName, id) + return nil + } + m.idList.MoveToBack(el) + el.Value.(*idNode).lastVisit = time.Now() + } else { + el = m.idList.PushBack(newIDNode(tableName, id)) + m.idIndex[tableName][id] = el + } + return v + } + + // store bean is not exist, then remove memory's index + m.delBean(tableName, id) + return nil +} + +// clearIds clears all sql-ids mapping on table tableName from cache +func (m *LRUCacher) clearIds(tableName string) { + if tis, ok := m.sqlIndex[tableName]; ok { + for sql, v := range tis { + m.sqlList.Remove(v) + m.store.Del(sql) + } + } + m.sqlIndex[tableName] = make(map[string]*list.Element) +} + +// ClearIds clears all sql-ids mapping on table tableName from cache +func (m *LRUCacher) ClearIds(tableName string) { + m.mutex.Lock() + m.clearIds(tableName) + m.mutex.Unlock() +} + +func (m *LRUCacher) clearBeans(tableName string) { + if tis, ok := m.idIndex[tableName]; ok { + for id, v := range tis { + m.idList.Remove(v) + tid := genID(tableName, id) + m.store.Del(tid) + } + } + m.idIndex[tableName] = make(map[string]*list.Element) +} + +// ClearBeans clears all beans in some table +func (m *LRUCacher) ClearBeans(tableName string) { + m.mutex.Lock() + m.clearBeans(tableName) + m.mutex.Unlock() +} + +// PutIds pus ids into table +func (m *LRUCacher) PutIds(tableName, sql string, ids interface{}) { + m.mutex.Lock() + if _, ok := m.sqlIndex[tableName]; !ok { + m.sqlIndex[tableName] = make(map[string]*list.Element) + } + if el, ok := m.sqlIndex[tableName][sql]; !ok { + el = m.sqlList.PushBack(newSQLNode(tableName, sql)) + m.sqlIndex[tableName][sql] = el + } else { + el.Value.(*sqlNode).lastVisit = time.Now() + } + m.store.Put(sql, ids) + if m.sqlList.Len() > m.MaxElementSize { + e := m.sqlList.Front() + node := e.Value.(*sqlNode) + m.delIds(node.tbName, node.sql) + } + m.mutex.Unlock() +} + +// PutBean puts beans into table +func (m *LRUCacher) PutBean(tableName string, id string, obj interface{}) { + m.mutex.Lock() + var el *list.Element + var ok bool + + if el, ok = m.idIndex[tableName][id]; !ok { + el = m.idList.PushBack(newIDNode(tableName, id)) + m.idIndex[tableName][id] = el + } else { + el.Value.(*idNode).lastVisit = time.Now() + } + + m.store.Put(genID(tableName, id), obj) + if m.idList.Len() > m.MaxElementSize { + e := m.idList.Front() + node := e.Value.(*idNode) + m.delBean(node.tbName, node.id) + } + m.mutex.Unlock() +} + +func (m *LRUCacher) delIds(tableName, sql string) { + if _, ok := m.sqlIndex[tableName]; ok { + if el, ok := m.sqlIndex[tableName][sql]; ok { + delete(m.sqlIndex[tableName], sql) + m.sqlList.Remove(el) + } + } + m.store.Del(sql) +} + +// DelIds deletes ids +func (m *LRUCacher) DelIds(tableName, sql string) { + m.mutex.Lock() + m.delIds(tableName, sql) + m.mutex.Unlock() +} + +func (m *LRUCacher) delBean(tableName string, id string) { + tid := genID(tableName, id) + if el, ok := m.idIndex[tableName][id]; ok { + delete(m.idIndex[tableName], id) + m.idList.Remove(el) + m.clearIds(tableName) + } + m.store.Del(tid) +} + +// DelBean deletes beans in some table +func (m *LRUCacher) DelBean(tableName string, id string) { + m.mutex.Lock() + m.delBean(tableName, id) + m.mutex.Unlock() +} + +type idNode struct { + tbName string + id string + lastVisit time.Time +} + +type sqlNode struct { + tbName string + sql string + lastVisit time.Time +} + +func genSQLKey(sql string, args interface{}) string { + return fmt.Sprintf("%s-%v", sql, args) +} + +func genID(prefix string, id string) string { + return fmt.Sprintf("%s-%s", prefix, id) +} + +func newIDNode(tbName string, id string) *idNode { + return &idNode{tbName, id, time.Now()} +} + +func newSQLNode(tbName, sql string) *sqlNode { + return &sqlNode{tbName, sql, time.Now()} +} diff --git a/vendor/xorm.io/xorm/caches/manager.go b/vendor/xorm.io/xorm/caches/manager.go new file mode 100644 index 0000000000..05045210d2 --- /dev/null +++ b/vendor/xorm.io/xorm/caches/manager.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import "sync" + +type Manager struct { + cacher Cacher + disableGlobalCache bool + + cachers map[string]Cacher + cacherLock sync.RWMutex +} + +func NewManager() *Manager { + return &Manager{ + cachers: make(map[string]Cacher), + } +} + +// SetDisableGlobalCache disable global cache or not +func (mgr *Manager) SetDisableGlobalCache(disable bool) { + if mgr.disableGlobalCache != disable { + mgr.disableGlobalCache = disable + } +} + +func (mgr *Manager) SetCacher(tableName string, cacher Cacher) { + mgr.cacherLock.Lock() + mgr.cachers[tableName] = cacher + mgr.cacherLock.Unlock() +} + +func (mgr *Manager) GetCacher(tableName string) Cacher { + var cacher Cacher + var ok bool + mgr.cacherLock.RLock() + cacher, ok = mgr.cachers[tableName] + mgr.cacherLock.RUnlock() + if !ok && !mgr.disableGlobalCache { + cacher = mgr.cacher + } + return cacher +} + +// SetDefaultCacher set the default cacher. Xorm's default not enable cacher. +func (mgr *Manager) SetDefaultCacher(cacher Cacher) { + mgr.cacher = cacher +} + +// GetDefaultCacher returns the default cacher +func (mgr *Manager) GetDefaultCacher() Cacher { + return mgr.cacher +} diff --git a/vendor/xorm.io/xorm/caches/memory_store.go b/vendor/xorm.io/xorm/caches/memory_store.go new file mode 100644 index 0000000000..f16254d823 --- /dev/null +++ b/vendor/xorm.io/xorm/caches/memory_store.go @@ -0,0 +1,49 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package caches + +import ( + "sync" +) + +var _ CacheStore = NewMemoryStore() + +// MemoryStore represents in-memory store +type MemoryStore struct { + store map[interface{}]interface{} + mutex sync.RWMutex +} + +// NewMemoryStore creates a new store in memory +func NewMemoryStore() *MemoryStore { + return &MemoryStore{store: make(map[interface{}]interface{})} +} + +// Put puts object into store +func (s *MemoryStore) Put(key string, value interface{}) error { + s.mutex.Lock() + defer s.mutex.Unlock() + s.store[key] = value + return nil +} + +// Get gets object from store +func (s *MemoryStore) Get(key string) (interface{}, error) { + s.mutex.RLock() + defer s.mutex.RUnlock() + if v, ok := s.store[key]; ok { + return v, nil + } + + return nil, ErrNotExist +} + +// Del deletes object +func (s *MemoryStore) Del(key string) error { + s.mutex.Lock() + defer s.mutex.Unlock() + delete(s.store, key) + return nil +} diff --git a/vendor/xorm.io/xorm/contexts/context_cache.go b/vendor/xorm.io/xorm/contexts/context_cache.go new file mode 100644 index 0000000000..0d0f0f02b4 --- /dev/null +++ b/vendor/xorm.io/xorm/contexts/context_cache.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package contexts + +// ContextCache is the interface that operates the cache data. +type ContextCache interface { + // Put puts value into cache with key. + Put(key string, val interface{}) + // Get gets cached value by given key. + Get(key string) interface{} +} + +type memoryContextCache map[string]interface{} + +// NewMemoryContextCache return memoryContextCache +func NewMemoryContextCache() memoryContextCache { + return make(map[string]interface{}) +} + +// Put puts value into cache with key. +func (m memoryContextCache) Put(key string, val interface{}) { + m[key] = val +} + +// Get gets cached value by given key. +func (m memoryContextCache) Get(key string) interface{} { + return m[key] +} diff --git a/vendor/xorm.io/xorm/contexts/hook.go b/vendor/xorm.io/xorm/contexts/hook.go new file mode 100644 index 0000000000..71ad8e8721 --- /dev/null +++ b/vendor/xorm.io/xorm/contexts/hook.go @@ -0,0 +1,75 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package contexts + +import ( + "context" + "database/sql" + "time" +) + +// ContextHook represents a hook context +type ContextHook struct { + start time.Time + Ctx context.Context + SQL string // log content or SQL + Args []interface{} // if it's a SQL, it's the arguments + Result sql.Result + ExecuteTime time.Duration + Err error // SQL executed error +} + +// NewContextHook return context for hook +func NewContextHook(ctx context.Context, sql string, args []interface{}) *ContextHook { + return &ContextHook{ + start: time.Now(), + Ctx: ctx, + SQL: sql, + Args: args, + } +} + +func (c *ContextHook) End(ctx context.Context, result sql.Result, err error) { + c.Ctx = ctx + c.Result = result + c.Err = err + c.ExecuteTime = time.Now().Sub(c.start) +} + +type Hook interface { + BeforeProcess(c *ContextHook) (context.Context, error) + AfterProcess(c *ContextHook) error +} + +type Hooks struct { + hooks []Hook +} + +func (h *Hooks) AddHook(hooks ...Hook) { + h.hooks = append(h.hooks, hooks...) +} + +func (h *Hooks) BeforeProcess(c *ContextHook) (context.Context, error) { + ctx := c.Ctx + for _, h := range h.hooks { + var err error + ctx, err = h.BeforeProcess(c) + if err != nil { + return nil, err + } + } + return ctx, nil +} + +func (h *Hooks) AfterProcess(c *ContextHook) error { + firstErr := c.Err + for _, h := range h.hooks { + err := h.AfterProcess(c) + if err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} diff --git a/vendor/xorm.io/xorm/convert.go b/vendor/xorm.io/xorm/convert.go new file mode 100644 index 0000000000..c19d30e07f --- /dev/null +++ b/vendor/xorm.io/xorm/convert.go @@ -0,0 +1,422 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "strconv" + "time" +) + +var errNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error + +func strconvErr(err error) error { + if ne, ok := err.(*strconv.NumError); ok { + return ne.Err + } + return err +} + +func cloneBytes(b []byte) []byte { + if b == nil { + return nil + } + c := make([]byte, len(b)) + copy(c, b) + return c +} + +func asString(src interface{}) string { + switch v := src.(type) { + case string: + return v + case []byte: + return string(v) + } + rv := reflect.ValueOf(src) + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(rv.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(rv.Uint(), 10) + case reflect.Float64: + return strconv.FormatFloat(rv.Float(), 'g', -1, 64) + case reflect.Float32: + return strconv.FormatFloat(rv.Float(), 'g', -1, 32) + case reflect.Bool: + return strconv.FormatBool(rv.Bool()) + } + return fmt.Sprintf("%v", src) +} + +func asBytes(buf []byte, rv reflect.Value) (b []byte, ok bool) { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.AppendInt(buf, rv.Int(), 10), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.AppendUint(buf, rv.Uint(), 10), true + case reflect.Float32: + return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 32), true + case reflect.Float64: + return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 64), true + case reflect.Bool: + return strconv.AppendBool(buf, rv.Bool()), true + case reflect.String: + s := rv.String() + return append(buf, s...), true + } + return +} + +// convertAssign copies to dest the value in src, converting it if possible. +// An error is returned if the copy would result in loss of information. +// dest should be a pointer type. +func convertAssign(dest, src interface{}) error { + // Common cases, without reflect. + switch s := src.(type) { + case string: + switch d := dest.(type) { + case *string: + if d == nil { + return errNilPtr + } + *d = s + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = []byte(s) + return nil + } + case []byte: + switch d := dest.(type) { + case *string: + if d == nil { + return errNilPtr + } + *d = string(s) + return nil + case *interface{}: + if d == nil { + return errNilPtr + } + *d = cloneBytes(s) + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = cloneBytes(s) + return nil + } + + case time.Time: + switch d := dest.(type) { + case *string: + *d = s.Format(time.RFC3339Nano) + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = []byte(s.Format(time.RFC3339Nano)) + return nil + } + case nil: + switch d := dest.(type) { + case *interface{}: + if d == nil { + return errNilPtr + } + *d = nil + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = nil + return nil + } + } + + var sv reflect.Value + + switch d := dest.(type) { + case *string: + sv = reflect.ValueOf(src) + switch sv.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + *d = asString(src) + return nil + } + case *[]byte: + sv = reflect.ValueOf(src) + if b, ok := asBytes(nil, sv); ok { + *d = b + return nil + } + case *bool: + bv, err := driver.Bool.ConvertValue(src) + if err == nil { + *d = bv.(bool) + } + return err + case *interface{}: + *d = src + return nil + } + + dpv := reflect.ValueOf(dest) + if dpv.Kind() != reflect.Ptr { + return errors.New("destination not a pointer") + } + if dpv.IsNil() { + return errNilPtr + } + + if !sv.IsValid() { + sv = reflect.ValueOf(src) + } + + dv := reflect.Indirect(dpv) + if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) { + switch b := src.(type) { + case []byte: + dv.Set(reflect.ValueOf(cloneBytes(b))) + default: + dv.Set(sv) + } + return nil + } + + if dv.Kind() == sv.Kind() && sv.Type().ConvertibleTo(dv.Type()) { + dv.Set(sv.Convert(dv.Type())) + return nil + } + + switch dv.Kind() { + case reflect.Ptr: + if src == nil { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + + dv.Set(reflect.New(dv.Type().Elem())) + return convertAssign(dv.Interface(), src) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := asString(src) + i64, err := strconv.ParseInt(s, 10, dv.Type().Bits()) + if err != nil { + err = strconvErr(err) + return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) + } + dv.SetInt(i64) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + s := asString(src) + u64, err := strconv.ParseUint(s, 10, dv.Type().Bits()) + if err != nil { + err = strconvErr(err) + return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) + } + dv.SetUint(u64) + return nil + case reflect.Float32, reflect.Float64: + s := asString(src) + f64, err := strconv.ParseFloat(s, dv.Type().Bits()) + if err != nil { + err = strconvErr(err) + return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) + } + dv.SetFloat(f64) + return nil + case reflect.String: + dv.SetString(asString(src)) + return nil + } + + return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest) +} + +func asKind(vv reflect.Value, tp reflect.Type) (interface{}, error) { + switch tp.Kind() { + case reflect.Int64: + return vv.Int(), nil + case reflect.Int: + return int(vv.Int()), nil + case reflect.Int32: + return int32(vv.Int()), nil + case reflect.Int16: + return int16(vv.Int()), nil + case reflect.Int8: + return int8(vv.Int()), nil + case reflect.Uint64: + return vv.Uint(), nil + case reflect.Uint: + return uint(vv.Uint()), nil + case reflect.Uint32: + return uint32(vv.Uint()), nil + case reflect.Uint16: + return uint16(vv.Uint()), nil + case reflect.Uint8: + return uint8(vv.Uint()), nil + case reflect.String: + return vv.String(), nil + case reflect.Slice: + if tp.Elem().Kind() == reflect.Uint8 { + v, err := strconv.ParseInt(string(vv.Interface().([]byte)), 10, 64) + if err != nil { + return nil, err + } + return v, nil + } + + } + return nil, fmt.Errorf("unsupported primary key type: %v, %v", tp, vv) +} + +func asBool(bs []byte) (bool, error) { + if len(bs) == 0 { + return false, nil + } + if bs[0] == 0x00 { + return false, nil + } else if bs[0] == 0x01 { + return true, nil + } + return strconv.ParseBool(string(bs)) +} + +// str2PK convert string value to primary key value according to tp +func str2PKValue(s string, tp reflect.Type) (reflect.Value, error) { + var err error + var result interface{} + var defReturn = reflect.Zero(tp) + + switch tp.Kind() { + case reflect.Int: + result, err = strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int: %s", s, err.Error()) + } + case reflect.Int8: + x, err := strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int8: %s", s, err.Error()) + } + result = int8(x) + case reflect.Int16: + x, err := strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int16: %s", s, err.Error()) + } + result = int16(x) + case reflect.Int32: + x, err := strconv.Atoi(s) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int32: %s", s, err.Error()) + } + result = int32(x) + case reflect.Int64: + result, err = strconv.ParseInt(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as int64: %s", s, err.Error()) + } + case reflect.Uint: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint: %s", s, err.Error()) + } + result = uint(x) + case reflect.Uint8: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint8: %s", s, err.Error()) + } + result = uint8(x) + case reflect.Uint16: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint16: %s", s, err.Error()) + } + result = uint16(x) + case reflect.Uint32: + x, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint32: %s", s, err.Error()) + } + result = uint32(x) + case reflect.Uint64: + result, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return defReturn, fmt.Errorf("convert %s as uint64: %s", s, err.Error()) + } + case reflect.String: + result = s + default: + return defReturn, errors.New("unsupported convert type") + } + return reflect.ValueOf(result).Convert(tp), nil +} + +func str2PK(s string, tp reflect.Type) (interface{}, error) { + v, err := str2PKValue(s, tp) + if err != nil { + return nil, err + } + return v.Interface(), nil +} + +func int64ToIntValue(id int64, tp reflect.Type) reflect.Value { + var v interface{} + kind := tp.Kind() + + if kind == reflect.Ptr { + kind = tp.Elem().Kind() + } + + switch kind { + case reflect.Int16: + temp := int16(id) + v = &temp + case reflect.Int32: + temp := int32(id) + v = &temp + case reflect.Int: + temp := int(id) + v = &temp + case reflect.Int64: + temp := id + v = &temp + case reflect.Uint16: + temp := uint16(id) + v = &temp + case reflect.Uint32: + temp := uint32(id) + v = &temp + case reflect.Uint64: + temp := uint64(id) + v = &temp + case reflect.Uint: + temp := uint(id) + v = &temp + } + + if tp.Kind() == reflect.Ptr { + return reflect.ValueOf(v).Convert(tp) + } + return reflect.ValueOf(v).Elem().Convert(tp) +} + +func int64ToInt(id int64, tp reflect.Type) interface{} { + return int64ToIntValue(id, tp).Interface() +} diff --git a/vendor/xorm.io/xorm/convert/conversion.go b/vendor/xorm.io/xorm/convert/conversion.go new file mode 100644 index 0000000000..16f1a92a16 --- /dev/null +++ b/vendor/xorm.io/xorm/convert/conversion.go @@ -0,0 +1,12 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package convert + +// Conversion is an interface. A type implements Conversion will according +// the custom method to fill into database and retrieve from database. +type Conversion interface { + FromDB([]byte) error + ToDB() ([]byte, error) +} diff --git a/vendor/xorm.io/xorm/core/db.go b/vendor/xorm.io/xorm/core/db.go new file mode 100644 index 0000000000..50c64c6fa6 --- /dev/null +++ b/vendor/xorm.io/xorm/core/db.go @@ -0,0 +1,293 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "regexp" + "sync" + + "xorm.io/xorm/contexts" + "xorm.io/xorm/log" + "xorm.io/xorm/names" +) + +var ( + // DefaultCacheSize sets the default cache size + DefaultCacheSize = 200 +) + +func MapToSlice(query string, mp interface{}) (string, []interface{}, error) { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return "", []interface{}{}, ErrNoMapPointer + } + + args := make([]interface{}, 0, len(vv.Elem().MapKeys())) + var err error + query = re.ReplaceAllStringFunc(query, func(src string) string { + v := vv.Elem().MapIndex(reflect.ValueOf(src[1:])) + if !v.IsValid() { + err = fmt.Errorf("map key %s is missing", src[1:]) + } else { + args = append(args, v.Interface()) + } + return "?" + }) + + return query, args, err +} + +func StructToSlice(query string, st interface{}) (string, []interface{}, error) { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return "", []interface{}{}, ErrNoStructPointer + } + + args := make([]interface{}, 0) + var err error + query = re.ReplaceAllStringFunc(query, func(src string) string { + fv := vv.Elem().FieldByName(src[1:]).Interface() + if v, ok := fv.(driver.Valuer); ok { + var value driver.Value + value, err = v.Value() + if err != nil { + return "?" + } + args = append(args, value) + } else { + args = append(args, fv) + } + return "?" + }) + if err != nil { + return "", []interface{}{}, err + } + return query, args, nil +} + +type cacheStruct struct { + value reflect.Value + idx int +} + +var ( + _ QueryExecuter = &DB{} +) + +// DB is a wrap of sql.DB with extra contents +type DB struct { + *sql.DB + Mapper names.Mapper + reflectCache map[reflect.Type]*cacheStruct + reflectCacheMutex sync.RWMutex + Logger log.ContextLogger + hooks contexts.Hooks +} + +// Open opens a database +func Open(driverName, dataSourceName string) (*DB, error) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return &DB{ + DB: db, + Mapper: names.NewCacheMapper(&names.SnakeMapper{}), + reflectCache: make(map[reflect.Type]*cacheStruct), + }, nil +} + +// FromDB creates a DB from a sql.DB +func FromDB(db *sql.DB) *DB { + return &DB{ + DB: db, + Mapper: names.NewCacheMapper(&names.SnakeMapper{}), + reflectCache: make(map[reflect.Type]*cacheStruct), + } +} + +// NeedLogSQL returns true if need to log SQL +func (db *DB) NeedLogSQL(ctx context.Context) bool { + if db.Logger == nil { + return false + } + + v := ctx.Value(log.SessionShowSQLKey) + if showSQL, ok := v.(bool); ok { + return showSQL + } + return db.Logger.IsShowSQL() +} + +func (db *DB) reflectNew(typ reflect.Type) reflect.Value { + db.reflectCacheMutex.Lock() + defer db.reflectCacheMutex.Unlock() + cs, ok := db.reflectCache[typ] + if !ok || cs.idx+1 > DefaultCacheSize-1 { + cs = &cacheStruct{reflect.MakeSlice(reflect.SliceOf(typ), DefaultCacheSize, DefaultCacheSize), 0} + db.reflectCache[typ] = cs + } else { + cs.idx = cs.idx + 1 + } + return cs.value.Index(cs.idx).Addr() +} + +// QueryContext overwrites sql.DB.QueryContext +func (db *DB) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + hookCtx := contexts.NewContextHook(ctx, query, args) + ctx, err := db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + rows, err := db.DB.QueryContext(ctx, query, args...) + hookCtx.End(ctx, nil, err) + if err := db.afterProcess(hookCtx); err != nil { + if rows != nil { + rows.Close() + } + return nil, err + } + return &Rows{rows, db}, nil +} + +// Query overwrites sql.DB.Query +func (db *DB) Query(query string, args ...interface{}) (*Rows, error) { + return db.QueryContext(context.Background(), query, args...) +} + +// QueryMapContext executes query with parameters via map and context +func (db *DB) QueryMapContext(ctx context.Context, query string, mp interface{}) (*Rows, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return db.QueryContext(ctx, query, args...) +} + +// QueryMap executes query with parameters via map +func (db *DB) QueryMap(query string, mp interface{}) (*Rows, error) { + return db.QueryMapContext(context.Background(), query, mp) +} + +func (db *DB) QueryStructContext(ctx context.Context, query string, st interface{}) (*Rows, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return db.QueryContext(ctx, query, args...) +} + +func (db *DB) QueryStruct(query string, st interface{}) (*Rows, error) { + return db.QueryStructContext(context.Background(), query, st) +} + +func (db *DB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := db.QueryContext(ctx, query, args...) + if err != nil { + return &Row{nil, err} + } + return &Row{rows, nil} +} + +func (db *DB) QueryRow(query string, args ...interface{}) *Row { + return db.QueryRowContext(context.Background(), query, args...) +} + +func (db *DB) QueryRowMapContext(ctx context.Context, query string, mp interface{}) *Row { + query, args, err := MapToSlice(query, mp) + if err != nil { + return &Row{nil, err} + } + return db.QueryRowContext(ctx, query, args...) +} + +func (db *DB) QueryRowMap(query string, mp interface{}) *Row { + return db.QueryRowMapContext(context.Background(), query, mp) +} + +func (db *DB) QueryRowStructContext(ctx context.Context, query string, st interface{}) *Row { + query, args, err := StructToSlice(query, st) + if err != nil { + return &Row{nil, err} + } + return db.QueryRowContext(ctx, query, args...) +} + +func (db *DB) QueryRowStruct(query string, st interface{}) *Row { + return db.QueryRowStructContext(context.Background(), query, st) +} + +var ( + re = regexp.MustCompile(`[?](\w+)`) +) + +// ExecMapContext exec map with context.ContextHook +// insert into (name) values (?) +// insert into (name) values (?name) +func (db *DB) ExecMapContext(ctx context.Context, query string, mp interface{}) (sql.Result, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return db.ExecContext(ctx, query, args...) +} + +func (db *DB) ExecMap(query string, mp interface{}) (sql.Result, error) { + return db.ExecMapContext(context.Background(), query, mp) +} + +func (db *DB) ExecStructContext(ctx context.Context, query string, st interface{}) (sql.Result, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return db.ExecContext(ctx, query, args...) +} + +func (db *DB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + hookCtx := contexts.NewContextHook(ctx, query, args) + ctx, err := db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + res, err := db.DB.ExecContext(ctx, query, args...) + hookCtx.End(ctx, res, err) + if err := db.afterProcess(hookCtx); err != nil { + return nil, err + } + return res, nil +} + +func (db *DB) ExecStruct(query string, st interface{}) (sql.Result, error) { + return db.ExecStructContext(context.Background(), query, st) +} + +func (db *DB) beforeProcess(c *contexts.ContextHook) (context.Context, error) { + if db.NeedLogSQL(c.Ctx) { + db.Logger.BeforeSQL(log.LogContext(*c)) + } + ctx, err := db.hooks.BeforeProcess(c) + if err != nil { + return nil, err + } + return ctx, nil +} + +func (db *DB) afterProcess(c *contexts.ContextHook) error { + err := db.hooks.AfterProcess(c) + if db.NeedLogSQL(c.Ctx) { + db.Logger.AfterSQL(log.LogContext(*c)) + } + return err +} + +func (db *DB) AddHook(h ...contexts.Hook) { + db.hooks.AddHook(h...) +} diff --git a/vendor/xorm.io/xorm/core/error.go b/vendor/xorm.io/xorm/core/error.go new file mode 100644 index 0000000000..1fd183483b --- /dev/null +++ b/vendor/xorm.io/xorm/core/error.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import "errors" + +var ( + // ErrNoMapPointer represents error when no map pointer + ErrNoMapPointer = errors.New("mp should be a map's pointer") + // ErrNoStructPointer represents error when no struct pointer + ErrNoStructPointer = errors.New("mp should be a struct's pointer") +) diff --git a/vendor/xorm.io/xorm/core/interface.go b/vendor/xorm.io/xorm/core/interface.go new file mode 100644 index 0000000000..a5c8e4e229 --- /dev/null +++ b/vendor/xorm.io/xorm/core/interface.go @@ -0,0 +1,22 @@ +package core + +import ( + "context" + "database/sql" +) + +// Queryer represents an interface to query a SQL to get data from database +type Queryer interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) +} + +// Executer represents an interface to execute a SQL +type Executer interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// QueryExecuter combines the Queryer and Executer +type QueryExecuter interface { + Queryer + Executer +} diff --git a/vendor/xorm.io/xorm/core/rows.go b/vendor/xorm.io/xorm/core/rows.go new file mode 100644 index 0000000000..a1e8bfbcde --- /dev/null +++ b/vendor/xorm.io/xorm/core/rows.go @@ -0,0 +1,338 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "database/sql" + "errors" + "reflect" + "sync" +) + +type Rows struct { + *sql.Rows + db *DB +} + +func (rs *Rows) ToMapString() ([]map[string]string, error) { + cols, err := rs.Columns() + if err != nil { + return nil, err + } + + var results = make([]map[string]string, 0, 10) + for rs.Next() { + var record = make(map[string]string, len(cols)) + err = rs.ScanMap(&record) + if err != nil { + return nil, err + } + results = append(results, record) + } + return results, nil +} + +// scan data to a struct's pointer according field index +func (rs *Rows) ScanStructByIndex(dest ...interface{}) error { + if len(dest) == 0 { + return errors.New("at least one struct") + } + + vvvs := make([]reflect.Value, len(dest)) + for i, s := range dest { + vv := reflect.ValueOf(s) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return errors.New("dest should be a struct's pointer") + } + + vvvs[i] = vv.Elem() + } + + cols, err := rs.Columns() + if err != nil { + return err + } + newDest := make([]interface{}, len(cols)) + + var i = 0 + for _, vvv := range vvvs { + for j := 0; j < vvv.NumField(); j++ { + newDest[i] = vvv.Field(j).Addr().Interface() + i = i + 1 + } + } + + return rs.Rows.Scan(newDest...) +} + +var ( + fieldCache = make(map[reflect.Type]map[string]int) + fieldCacheMutex sync.RWMutex +) + +func fieldByName(v reflect.Value, name string) reflect.Value { + t := v.Type() + fieldCacheMutex.RLock() + cache, ok := fieldCache[t] + fieldCacheMutex.RUnlock() + if !ok { + cache = make(map[string]int) + for i := 0; i < v.NumField(); i++ { + cache[t.Field(i).Name] = i + } + fieldCacheMutex.Lock() + fieldCache[t] = cache + fieldCacheMutex.Unlock() + } + + if i, ok := cache[name]; ok { + return v.Field(i) + } + + return reflect.Zero(t) +} + +// scan data to a struct's pointer according field name +func (rs *Rows) ScanStructByName(dest interface{}) error { + vv := reflect.ValueOf(dest) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return errors.New("dest should be a struct's pointer") + } + + cols, err := rs.Columns() + if err != nil { + return err + } + + newDest := make([]interface{}, len(cols)) + var v EmptyScanner + for j, name := range cols { + f := fieldByName(vv.Elem(), rs.db.Mapper.Table2Obj(name)) + if f.IsValid() { + newDest[j] = f.Addr().Interface() + } else { + newDest[j] = &v + } + } + + return rs.Rows.Scan(newDest...) +} + +// scan data to a slice's pointer, slice's length should equal to columns' number +func (rs *Rows) ScanSlice(dest interface{}) error { + vv := reflect.ValueOf(dest) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Slice { + return errors.New("dest should be a slice's pointer") + } + + vvv := vv.Elem() + cols, err := rs.Columns() + if err != nil { + return err + } + + newDest := make([]interface{}, len(cols)) + + for j := 0; j < len(cols); j++ { + if j >= vvv.Len() { + newDest[j] = reflect.New(vvv.Type().Elem()).Interface() + } else { + newDest[j] = vvv.Index(j).Addr().Interface() + } + } + + err = rs.Rows.Scan(newDest...) + if err != nil { + return err + } + + srcLen := vvv.Len() + for i := srcLen; i < len(cols); i++ { + vvv = reflect.Append(vvv, reflect.ValueOf(newDest[i]).Elem()) + } + return nil +} + +// scan data to a map's pointer +func (rs *Rows) ScanMap(dest interface{}) error { + vv := reflect.ValueOf(dest) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return errors.New("dest should be a map's pointer") + } + + cols, err := rs.Columns() + if err != nil { + return err + } + + newDest := make([]interface{}, len(cols)) + vvv := vv.Elem() + + for i := range cols { + newDest[i] = rs.db.reflectNew(vvv.Type().Elem()).Interface() + } + + err = rs.Rows.Scan(newDest...) + if err != nil { + return err + } + + for i, name := range cols { + vname := reflect.ValueOf(name) + vvv.SetMapIndex(vname, reflect.ValueOf(newDest[i]).Elem()) + } + + return nil +} + +type Row struct { + rows *Rows + // One of these two will be non-nil: + err error // deferred error for easy chaining +} + +// ErrorRow return an error row +func ErrorRow(err error) *Row { + return &Row{ + err: err, + } +} + +// NewRow from rows +func NewRow(rows *Rows, err error) *Row { + return &Row{rows, err} +} + +func (row *Row) Columns() ([]string, error) { + if row.err != nil { + return nil, row.err + } + return row.rows.Columns() +} + +func (row *Row) Scan(dest ...interface{}) error { + if row.err != nil { + return row.err + } + defer row.rows.Close() + + for _, dp := range dest { + if _, ok := dp.(*sql.RawBytes); ok { + return errors.New("sql: RawBytes isn't allowed on Row.Scan") + } + } + + if !row.rows.Next() { + if err := row.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := row.rows.Scan(dest...) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + return row.rows.Close() +} + +func (row *Row) ScanStructByName(dest interface{}) error { + if row.err != nil { + return row.err + } + defer row.rows.Close() + + if !row.rows.Next() { + if err := row.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := row.rows.ScanStructByName(dest) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + return row.rows.Close() +} + +func (row *Row) ScanStructByIndex(dest interface{}) error { + if row.err != nil { + return row.err + } + defer row.rows.Close() + + if !row.rows.Next() { + if err := row.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := row.rows.ScanStructByIndex(dest) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + return row.rows.Close() +} + +// scan data to a slice's pointer, slice's length should equal to columns' number +func (row *Row) ScanSlice(dest interface{}) error { + if row.err != nil { + return row.err + } + defer row.rows.Close() + + if !row.rows.Next() { + if err := row.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := row.rows.ScanSlice(dest) + if err != nil { + return err + } + + // Make sure the query can be processed to completion with no errors. + return row.rows.Close() +} + +// scan data to a map's pointer +func (row *Row) ScanMap(dest interface{}) error { + if row.err != nil { + return row.err + } + defer row.rows.Close() + + if !row.rows.Next() { + if err := row.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := row.rows.ScanMap(dest) + if err != nil { + return err + } + + // Make sure the query can be processed to completion with no errors. + return row.rows.Close() +} + +func (row *Row) ToMapString() (map[string]string, error) { + cols, err := row.Columns() + if err != nil { + return nil, err + } + + var record = make(map[string]string, len(cols)) + err = row.ScanMap(&record) + if err != nil { + return nil, err + } + + return record, nil +} diff --git a/vendor/xorm.io/xorm/core/scan.go b/vendor/xorm.io/xorm/core/scan.go new file mode 100644 index 0000000000..897b534159 --- /dev/null +++ b/vendor/xorm.io/xorm/core/scan.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "database/sql/driver" + "fmt" + "time" +) + +type NullTime time.Time + +var ( + _ driver.Valuer = NullTime{} +) + +func (ns *NullTime) Scan(value interface{}) error { + if value == nil { + return nil + } + return convertTime(ns, value) +} + +// Value implements the driver Valuer interface. +func (ns NullTime) Value() (driver.Value, error) { + if (time.Time)(ns).IsZero() { + return nil, nil + } + return (time.Time)(ns).Format("2006-01-02 15:04:05"), nil +} + +func convertTime(dest *NullTime, src interface{}) error { + // Common cases, without reflect. + switch s := src.(type) { + case string: + t, err := time.Parse("2006-01-02 15:04:05", s) + if err != nil { + return err + } + *dest = NullTime(t) + return nil + case []uint8: + t, err := time.Parse("2006-01-02 15:04:05", string(s)) + if err != nil { + return err + } + *dest = NullTime(t) + return nil + case time.Time: + *dest = NullTime(s) + return nil + case nil: + default: + return fmt.Errorf("unsupported driver -> Scan pair: %T -> %T", src, dest) + } + return nil +} + +type EmptyScanner struct { +} + +func (EmptyScanner) Scan(src interface{}) error { + return nil +} diff --git a/vendor/xorm.io/xorm/core/stmt.go b/vendor/xorm.io/xorm/core/stmt.go new file mode 100644 index 0000000000..d46ac9c678 --- /dev/null +++ b/vendor/xorm.io/xorm/core/stmt.go @@ -0,0 +1,194 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "database/sql" + "errors" + "reflect" + + "xorm.io/xorm/contexts" +) + +// Stmt reprents a stmt objects +type Stmt struct { + *sql.Stmt + db *DB + names map[string]int + query string +} + +func (db *DB) PrepareContext(ctx context.Context, query string) (*Stmt, error) { + names := make(map[string]int) + var i int + query = re.ReplaceAllStringFunc(query, func(src string) string { + names[src[1:]] = i + i++ + return "?" + }) + hookCtx := contexts.NewContextHook(ctx, "PREPARE", nil) + ctx, err := db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + stmt, err := db.DB.PrepareContext(ctx, query) + hookCtx.End(ctx, nil, err) + if err := db.afterProcess(hookCtx); err != nil { + return nil, err + } + return &Stmt{stmt, db, names, query}, nil +} + +func (db *DB) Prepare(query string) (*Stmt, error) { + return db.PrepareContext(context.Background(), query) +} + +func (s *Stmt) ExecMapContext(ctx context.Context, mp interface{}) (sql.Result, error) { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() + } + return s.ExecContext(ctx, args...) +} + +func (s *Stmt) ExecMap(mp interface{}) (sql.Result, error) { + return s.ExecMapContext(context.Background(), mp) +} + +func (s *Stmt) ExecStructContext(ctx context.Context, st interface{}) (sql.Result, error) { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().FieldByName(k).Interface() + } + return s.ExecContext(ctx, args...) +} + +func (s *Stmt) ExecStruct(st interface{}) (sql.Result, error) { + return s.ExecStructContext(context.Background(), st) +} + +func (s *Stmt) ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error) { + hookCtx := contexts.NewContextHook(ctx, s.query, args) + ctx, err := s.db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + res, err := s.Stmt.ExecContext(ctx, args) + hookCtx.End(ctx, res, err) + if err := s.db.afterProcess(hookCtx); err != nil { + return nil, err + } + return res, nil +} + +func (s *Stmt) QueryContext(ctx context.Context, args ...interface{}) (*Rows, error) { + hookCtx := contexts.NewContextHook(ctx, s.query, args) + ctx, err := s.db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + rows, err := s.Stmt.QueryContext(ctx, args...) + hookCtx.End(ctx, nil, err) + if err := s.db.afterProcess(hookCtx); err != nil { + return nil, err + } + return &Rows{rows, s.db}, nil +} + +func (s *Stmt) Query(args ...interface{}) (*Rows, error) { + return s.QueryContext(context.Background(), args...) +} + +func (s *Stmt) QueryMapContext(ctx context.Context, mp interface{}) (*Rows, error) { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() + } + + return s.QueryContext(ctx, args...) +} + +func (s *Stmt) QueryMap(mp interface{}) (*Rows, error) { + return s.QueryMapContext(context.Background(), mp) +} + +func (s *Stmt) QueryStructContext(ctx context.Context, st interface{}) (*Rows, error) { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().FieldByName(k).Interface() + } + + return s.QueryContext(ctx, args...) +} + +func (s *Stmt) QueryStruct(st interface{}) (*Rows, error) { + return s.QueryStructContext(context.Background(), st) +} + +func (s *Stmt) QueryRowContext(ctx context.Context, args ...interface{}) *Row { + rows, err := s.QueryContext(ctx, args...) + return &Row{rows, err} +} + +func (s *Stmt) QueryRow(args ...interface{}) *Row { + return s.QueryRowContext(context.Background(), args...) +} + +func (s *Stmt) QueryRowMapContext(ctx context.Context, mp interface{}) *Row { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return &Row{nil, errors.New("mp should be a map's pointer")} + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() + } + + return s.QueryRowContext(ctx, args...) +} + +func (s *Stmt) QueryRowMap(mp interface{}) *Row { + return s.QueryRowMapContext(context.Background(), mp) +} + +func (s *Stmt) QueryRowStructContext(ctx context.Context, st interface{}) *Row { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return &Row{nil, errors.New("st should be a struct's pointer")} + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().FieldByName(k).Interface() + } + + return s.QueryRowContext(ctx, args...) +} + +func (s *Stmt) QueryRowStruct(st interface{}) *Row { + return s.QueryRowStructContext(context.Background(), st) +} diff --git a/vendor/xorm.io/xorm/core/tx.go b/vendor/xorm.io/xorm/core/tx.go new file mode 100644 index 0000000000..a85a687457 --- /dev/null +++ b/vendor/xorm.io/xorm/core/tx.go @@ -0,0 +1,219 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "database/sql" + + "xorm.io/xorm/contexts" +) + +var ( + _ QueryExecuter = &Tx{} +) + +// Tx represents a transaction +type Tx struct { + *sql.Tx + db *DB + ctx context.Context +} + +func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + hookCtx := contexts.NewContextHook(ctx, "BEGIN TRANSACTION", nil) + ctx, err := db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + tx, err := db.DB.BeginTx(ctx, opts) + hookCtx.End(ctx, nil, err) + if err := db.afterProcess(hookCtx); err != nil { + return nil, err + } + return &Tx{tx, db, ctx}, nil +} + +func (db *DB) Begin() (*Tx, error) { + return db.BeginTx(context.Background(), nil) +} + +func (tx *Tx) Commit() error { + hookCtx := contexts.NewContextHook(tx.ctx, "COMMIT", nil) + ctx, err := tx.db.beforeProcess(hookCtx) + if err != nil { + return err + } + err = tx.Tx.Commit() + hookCtx.End(ctx, nil, err) + if err := tx.db.afterProcess(hookCtx); err != nil { + return err + } + return nil +} + +func (tx *Tx) Rollback() error { + hookCtx := contexts.NewContextHook(tx.ctx, "ROLLBACK", nil) + ctx, err := tx.db.beforeProcess(hookCtx) + if err != nil { + return err + } + err = tx.Tx.Rollback() + hookCtx.End(ctx, nil, err) + if err := tx.db.afterProcess(hookCtx); err != nil { + return err + } + return nil +} + +func (tx *Tx) PrepareContext(ctx context.Context, query string) (*Stmt, error) { + names := make(map[string]int) + var i int + query = re.ReplaceAllStringFunc(query, func(src string) string { + names[src[1:]] = i + i++ + return "?" + }) + hookCtx := contexts.NewContextHook(ctx, "PREPARE", nil) + ctx, err := tx.db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + stmt, err := tx.Tx.PrepareContext(ctx, query) + hookCtx.End(ctx, nil, err) + if err := tx.db.afterProcess(hookCtx); err != nil { + return nil, err + } + return &Stmt{stmt, tx.db, names, query}, nil +} + +func (tx *Tx) Prepare(query string) (*Stmt, error) { + return tx.PrepareContext(context.Background(), query) +} + +func (tx *Tx) StmtContext(ctx context.Context, stmt *Stmt) *Stmt { + stmt.Stmt = tx.Tx.StmtContext(ctx, stmt.Stmt) + return stmt +} + +func (tx *Tx) Stmt(stmt *Stmt) *Stmt { + return tx.StmtContext(context.Background(), stmt) +} + +func (tx *Tx) ExecMapContext(ctx context.Context, query string, mp interface{}) (sql.Result, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return tx.ExecContext(ctx, query, args...) +} + +func (tx *Tx) ExecMap(query string, mp interface{}) (sql.Result, error) { + return tx.ExecMapContext(context.Background(), query, mp) +} + +func (tx *Tx) ExecStructContext(ctx context.Context, query string, st interface{}) (sql.Result, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return tx.ExecContext(ctx, query, args...) +} + +func (tx *Tx) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + hookCtx := contexts.NewContextHook(ctx, query, args) + ctx, err := tx.db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + res, err := tx.Tx.ExecContext(ctx, query, args...) + hookCtx.End(ctx, res, err) + if err := tx.db.afterProcess(hookCtx); err != nil { + return nil, err + } + return res, err +} + +func (tx *Tx) ExecStruct(query string, st interface{}) (sql.Result, error) { + return tx.ExecStructContext(context.Background(), query, st) +} + +func (tx *Tx) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + hookCtx := contexts.NewContextHook(ctx, query, args) + ctx, err := tx.db.beforeProcess(hookCtx) + if err != nil { + return nil, err + } + rows, err := tx.Tx.QueryContext(ctx, query, args...) + hookCtx.End(ctx, nil, err) + if err := tx.db.afterProcess(hookCtx); err != nil { + if rows != nil { + rows.Close() + } + return nil, err + } + return &Rows{rows, tx.db}, nil +} + +func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) { + return tx.QueryContext(context.Background(), query, args...) +} + +func (tx *Tx) QueryMapContext(ctx context.Context, query string, mp interface{}) (*Rows, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return tx.QueryContext(ctx, query, args...) +} + +func (tx *Tx) QueryMap(query string, mp interface{}) (*Rows, error) { + return tx.QueryMapContext(context.Background(), query, mp) +} + +func (tx *Tx) QueryStructContext(ctx context.Context, query string, st interface{}) (*Rows, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return tx.QueryContext(ctx, query, args...) +} + +func (tx *Tx) QueryStruct(query string, st interface{}) (*Rows, error) { + return tx.QueryStructContext(context.Background(), query, st) +} + +func (tx *Tx) QueryRowContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := tx.QueryContext(ctx, query, args...) + return &Row{rows, err} +} + +func (tx *Tx) QueryRow(query string, args ...interface{}) *Row { + return tx.QueryRowContext(context.Background(), query, args...) +} + +func (tx *Tx) QueryRowMapContext(ctx context.Context, query string, mp interface{}) *Row { + query, args, err := MapToSlice(query, mp) + if err != nil { + return &Row{nil, err} + } + return tx.QueryRowContext(ctx, query, args...) +} + +func (tx *Tx) QueryRowMap(query string, mp interface{}) *Row { + return tx.QueryRowMapContext(context.Background(), query, mp) +} + +func (tx *Tx) QueryRowStructContext(ctx context.Context, query string, st interface{}) *Row { + query, args, err := StructToSlice(query, st) + if err != nil { + return &Row{nil, err} + } + return tx.QueryRowContext(ctx, query, args...) +} + +func (tx *Tx) QueryRowStruct(query string, st interface{}) *Row { + return tx.QueryRowStructContext(context.Background(), query, st) +} diff --git a/vendor/xorm.io/xorm/dialects/dialect.go b/vendor/xorm.io/xorm/dialects/dialect.go new file mode 100644 index 0000000000..dc96f73ae2 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/dialect.go @@ -0,0 +1,284 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "fmt" + "strings" + "time" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +// URI represents an uri to visit database +type URI struct { + DBType schemas.DBType + Proto string + Host string + Port string + DBName string + User string + Passwd string + Charset string + Laddr string + Raddr string + Timeout time.Duration + Schema string +} + +// SetSchema set schema +func (uri *URI) SetSchema(schema string) { + // hack me + if uri.DBType == schemas.POSTGRES { + uri.Schema = strings.TrimSpace(schema) + } +} + +// Dialect represents a kind of database +type Dialect interface { + Init(*URI) error + URI() *URI + SQLType(*schemas.Column) string + FormatBytes(b []byte) string + + IsReserved(string) bool + Quoter() schemas.Quoter + SetQuotePolicy(quotePolicy QuotePolicy) + + AutoIncrStr() string + + GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) + IndexCheckSQL(tableName, idxName string) (string, []interface{}) + CreateIndexSQL(tableName string, index *schemas.Index) string + DropIndexSQL(tableName string, index *schemas.Index) string + + GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) + IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) + CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) + DropTableSQL(tableName string) (string, bool) + + GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) + IsColumnExist(queryer core.Queryer, ctx context.Context, tableName string, colName string) (bool, error) + AddColumnSQL(tableName string, col *schemas.Column) string + ModifyColumnSQL(tableName string, col *schemas.Column) string + + ForUpdateSQL(query string) string + + Filters() []Filter + SetParams(params map[string]string) +} + +// Base represents a basic dialect and all real dialects could embed this struct +type Base struct { + dialect Dialect + uri *URI + quoter schemas.Quoter +} + +func (b *Base) Quoter() schemas.Quoter { + return b.quoter +} + +func (b *Base) Init(dialect Dialect, uri *URI) error { + b.dialect, b.uri = dialect, uri + return nil +} + +func (b *Base) URI() *URI { + return b.uri +} + +func (b *Base) DBType() schemas.DBType { + return b.uri.DBType +} + +func (b *Base) FormatBytes(bs []byte) string { + return fmt.Sprintf("0x%x", bs) +} + +func (db *Base) DropTableSQL(tableName string) (string, bool) { + quote := db.dialect.Quoter().Quote + return fmt.Sprintf("DROP TABLE IF EXISTS %s", quote(tableName)), true +} + +func (db *Base) HasRecords(queryer core.Queryer, ctx context.Context, query string, args ...interface{}) (bool, error) { + rows, err := queryer.QueryContext(ctx, query, args...) + if err != nil { + return false, err + } + defer rows.Close() + + if rows.Next() { + return true, nil + } + return false, nil +} + +func (db *Base) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { + quote := db.dialect.Quoter().Quote + query := fmt.Sprintf( + "SELECT %v FROM %v.%v WHERE %v = ? AND %v = ? AND %v = ?", + quote("COLUMN_NAME"), + quote("INFORMATION_SCHEMA"), + quote("COLUMNS"), + quote("TABLE_SCHEMA"), + quote("TABLE_NAME"), + quote("COLUMN_NAME"), + ) + return db.HasRecords(queryer, ctx, query, db.uri.DBName, tableName, colName) +} + +func (db *Base) AddColumnSQL(tableName string, col *schemas.Column) string { + s, _ := ColumnString(db.dialect, col, true) + return fmt.Sprintf("ALTER TABLE %v ADD %v", db.dialect.Quoter().Quote(tableName), s) +} + +func (db *Base) CreateIndexSQL(tableName string, index *schemas.Index) string { + quoter := db.dialect.Quoter() + var unique string + var idxName string + if index.Type == schemas.UniqueType { + unique = " UNIQUE" + } + idxName = index.XName(tableName) + return fmt.Sprintf("CREATE%s INDEX %v ON %v (%v)", unique, + quoter.Quote(idxName), quoter.Quote(tableName), + quoter.Join(index.Cols, ",")) +} + +func (db *Base) DropIndexSQL(tableName string, index *schemas.Index) string { + quote := db.dialect.Quoter().Quote + var name string + if index.IsRegular { + name = index.XName(tableName) + } else { + name = index.Name + } + return fmt.Sprintf("DROP INDEX %v ON %s", quote(name), quote(tableName)) +} + +func (db *Base) ModifyColumnSQL(tableName string, col *schemas.Column) string { + s, _ := ColumnString(db.dialect, col, false) + return fmt.Sprintf("alter table %s MODIFY COLUMN %s", tableName, s) +} + +func (b *Base) ForUpdateSQL(query string) string { + return query + " FOR UPDATE" +} + +func (b *Base) SetParams(params map[string]string) { +} + +var ( + dialects = map[string]func() Dialect{} +) + +// RegisterDialect register database dialect +func RegisterDialect(dbName schemas.DBType, dialectFunc func() Dialect) { + if dialectFunc == nil { + panic("core: Register dialect is nil") + } + dialects[strings.ToLower(string(dbName))] = dialectFunc // !nashtsai! allow override dialect +} + +// QueryDialect query if registered database dialect +func QueryDialect(dbName schemas.DBType) Dialect { + if d, ok := dialects[strings.ToLower(string(dbName))]; ok { + return d() + } + return nil +} + +func regDrvsNDialects() bool { + providedDrvsNDialects := map[string]struct { + dbType schemas.DBType + getDriver func() Driver + getDialect func() Dialect + }{ + "mssql": {"mssql", func() Driver { return &odbcDriver{} }, func() Dialect { return &mssql{} }}, + "odbc": {"mssql", func() Driver { return &odbcDriver{} }, func() Dialect { return &mssql{} }}, // !nashtsai! TODO change this when supporting MS Access + "mysql": {"mysql", func() Driver { return &mysqlDriver{} }, func() Dialect { return &mysql{} }}, + "mymysql": {"mysql", func() Driver { return &mymysqlDriver{} }, func() Dialect { return &mysql{} }}, + "postgres": {"postgres", func() Driver { return &pqDriver{} }, func() Dialect { return &postgres{} }}, + "pgx": {"postgres", func() Driver { return &pqDriverPgx{} }, func() Dialect { return &postgres{} }}, + "sqlite3": {"sqlite3", func() Driver { return &sqlite3Driver{} }, func() Dialect { return &sqlite3{} }}, + "oci8": {"oracle", func() Driver { return &oci8Driver{} }, func() Dialect { return &oracle{} }}, + "goracle": {"oracle", func() Driver { return &goracleDriver{} }, func() Dialect { return &oracle{} }}, + } + + for driverName, v := range providedDrvsNDialects { + if driver := QueryDriver(driverName); driver == nil { + RegisterDriver(driverName, v.getDriver()) + RegisterDialect(v.dbType, v.getDialect) + } + } + return true +} + +func init() { + regDrvsNDialects() +} + +// ColumnString generate column description string according dialect +func ColumnString(dialect Dialect, col *schemas.Column, includePrimaryKey bool) (string, error) { + bd := strings.Builder{} + + if err := dialect.Quoter().QuoteTo(&bd, col.Name); err != nil { + return "", err + } + + if err := bd.WriteByte(' '); err != nil { + return "", err + } + + if _, err := bd.WriteString(dialect.SQLType(col)); err != nil { + return "", err + } + + if err := bd.WriteByte(' '); err != nil { + return "", err + } + + if includePrimaryKey && col.IsPrimaryKey { + if _, err := bd.WriteString("PRIMARY KEY "); err != nil { + return "", err + } + + if col.IsAutoIncrement { + if _, err := bd.WriteString(dialect.AutoIncrStr()); err != nil { + return "", err + } + if err := bd.WriteByte(' '); err != nil { + return "", err + } + } + } + + if col.Default != "" { + if _, err := bd.WriteString("DEFAULT "); err != nil { + return "", err + } + if _, err := bd.WriteString(col.Default); err != nil { + return "", err + } + if err := bd.WriteByte(' '); err != nil { + return "", err + } + } + + if col.Nullable { + if _, err := bd.WriteString("NULL "); err != nil { + return "", err + } + } else { + if _, err := bd.WriteString("NOT NULL "); err != nil { + return "", err + } + } + + return bd.String(), nil +} diff --git a/vendor/xorm.io/xorm/dialects/driver.go b/vendor/xorm.io/xorm/dialects/driver.go new file mode 100644 index 0000000000..ae3afe4254 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/driver.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "fmt" +) + +type Driver interface { + Parse(string, string) (*URI, error) +} + +var ( + drivers = map[string]Driver{} +) + +func RegisterDriver(driverName string, driver Driver) { + if driver == nil { + panic("core: Register driver is nil") + } + if _, dup := drivers[driverName]; dup { + panic("core: Register called twice for driver " + driverName) + } + drivers[driverName] = driver +} + +func QueryDriver(driverName string) Driver { + return drivers[driverName] +} + +func RegisteredDriverSize() int { + return len(drivers) +} + +// OpenDialect opens a dialect via driver name and connection string +func OpenDialect(driverName, connstr string) (Dialect, error) { + driver := QueryDriver(driverName) + if driver == nil { + return nil, fmt.Errorf("Unsupported driver name: %v", driverName) + } + + uri, err := driver.Parse(driverName, connstr) + if err != nil { + return nil, err + } + + dialect := QueryDialect(uri.DBType) + if dialect == nil { + return nil, fmt.Errorf("Unsupported dialect type: %v", uri.DBType) + } + + dialect.Init(uri) + + return dialect, nil +} diff --git a/vendor/xorm.io/xorm/dialects/filter.go b/vendor/xorm.io/xorm/dialects/filter.go new file mode 100644 index 0000000000..6968b6ce86 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/filter.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "fmt" + "strings" +) + +// Filter is an interface to filter SQL +type Filter interface { + Do(sql string) string +} + +// SeqFilter filter SQL replace ?, ? ... to $1, $2 ... +type SeqFilter struct { + Prefix string + Start int +} + +func convertQuestionMark(sql, prefix string, start int) string { + var buf strings.Builder + var beginSingleQuote bool + var index = start + for _, c := range sql { + if !beginSingleQuote && c == '?' { + buf.WriteString(fmt.Sprintf("%s%v", prefix, index)) + index++ + } else { + if c == '\'' { + beginSingleQuote = !beginSingleQuote + } + buf.WriteRune(c) + } + } + return buf.String() +} + +func (s *SeqFilter) Do(sql string) string { + return convertQuestionMark(sql, s.Prefix, s.Start) +} diff --git a/vendor/xorm.io/xorm/dialects/gen_reserved.sh b/vendor/xorm.io/xorm/dialects/gen_reserved.sh new file mode 100644 index 0000000000..434a1bfcb0 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/gen_reserved.sh @@ -0,0 +1,6 @@ +#!/bin/bash +if [ -f $1 ];then + cat $1| awk '{printf("\""$1"\":true,\n")}' +else + echo "argument $1 if not a file!" +fi diff --git a/vendor/xorm.io/xorm/dialects/mssql.go b/vendor/xorm.io/xorm/dialects/mssql.go new file mode 100644 index 0000000000..f766950ca2 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/mssql.go @@ -0,0 +1,562 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +var ( + mssqlReservedWords = map[string]bool{ + "ADD": true, + "EXTERNAL": true, + "PROCEDURE": true, + "ALL": true, + "FETCH": true, + "PUBLIC": true, + "ALTER": true, + "FILE": true, + "RAISERROR": true, + "AND": true, + "FILLFACTOR": true, + "READ": true, + "ANY": true, + "FOR": true, + "READTEXT": true, + "AS": true, + "FOREIGN": true, + "RECONFIGURE": true, + "ASC": true, + "FREETEXT": true, + "REFERENCES": true, + "AUTHORIZATION": true, + "FREETEXTTABLE": true, + "REPLICATION": true, + "BACKUP": true, + "FROM": true, + "RESTORE": true, + "BEGIN": true, + "FULL": true, + "RESTRICT": true, + "BETWEEN": true, + "FUNCTION": true, + "RETURN": true, + "BREAK": true, + "GOTO": true, + "REVERT": true, + "BROWSE": true, + "GRANT": true, + "REVOKE": true, + "BULK": true, + "GROUP": true, + "RIGHT": true, + "BY": true, + "HAVING": true, + "ROLLBACK": true, + "CASCADE": true, + "HOLDLOCK": true, + "ROWCOUNT": true, + "CASE": true, + "IDENTITY": true, + "ROWGUIDCOL": true, + "CHECK": true, + "IDENTITY_INSERT": true, + "RULE": true, + "CHECKPOINT": true, + "IDENTITYCOL": true, + "SAVE": true, + "CLOSE": true, + "IF": true, + "SCHEMA": true, + "CLUSTERED": true, + "IN": true, + "SECURITYAUDIT": true, + "COALESCE": true, + "INDEX": true, + "SELECT": true, + "COLLATE": true, + "INNER": true, + "SEMANTICKEYPHRASETABLE": true, + "COLUMN": true, + "INSERT": true, + "SEMANTICSIMILARITYDETAILSTABLE": true, + "COMMIT": true, + "INTERSECT": true, + "SEMANTICSIMILARITYTABLE": true, + "COMPUTE": true, + "INTO": true, + "SESSION_USER": true, + "CONSTRAINT": true, + "IS": true, + "SET": true, + "CONTAINS": true, + "JOIN": true, + "SETUSER": true, + "CONTAINSTABLE": true, + "KEY": true, + "SHUTDOWN": true, + "CONTINUE": true, + "KILL": true, + "SOME": true, + "CONVERT": true, + "LEFT": true, + "STATISTICS": true, + "CREATE": true, + "LIKE": true, + "SYSTEM_USER": true, + "CROSS": true, + "LINENO": true, + "TABLE": true, + "CURRENT": true, + "LOAD": true, + "TABLESAMPLE": true, + "CURRENT_DATE": true, + "MERGE": true, + "TEXTSIZE": true, + "CURRENT_TIME": true, + "NATIONAL": true, + "THEN": true, + "CURRENT_TIMESTAMP": true, + "NOCHECK": true, + "TO": true, + "CURRENT_USER": true, + "NONCLUSTERED": true, + "TOP": true, + "CURSOR": true, + "NOT": true, + "TRAN": true, + "DATABASE": true, + "NULL": true, + "TRANSACTION": true, + "DBCC": true, + "NULLIF": true, + "TRIGGER": true, + "DEALLOCATE": true, + "OF": true, + "TRUNCATE": true, + "DECLARE": true, + "OFF": true, + "TRY_CONVERT": true, + "DEFAULT": true, + "OFFSETS": true, + "TSEQUAL": true, + "DELETE": true, + "ON": true, + "UNION": true, + "DENY": true, + "OPEN": true, + "UNIQUE": true, + "DESC": true, + "OPENDATASOURCE": true, + "UNPIVOT": true, + "DISK": true, + "OPENQUERY": true, + "UPDATE": true, + "DISTINCT": true, + "OPENROWSET": true, + "UPDATETEXT": true, + "DISTRIBUTED": true, + "OPENXML": true, + "USE": true, + "DOUBLE": true, + "OPTION": true, + "USER": true, + "DROP": true, + "OR": true, + "VALUES": true, + "DUMP": true, + "ORDER": true, + "VARYING": true, + "ELSE": true, + "OUTER": true, + "VIEW": true, + "END": true, + "OVER": true, + "WAITFOR": true, + "ERRLVL": true, + "PERCENT": true, + "WHEN": true, + "ESCAPE": true, + "PIVOT": true, + "WHERE": true, + "EXCEPT": true, + "PLAN": true, + "WHILE": true, + "EXEC": true, + "PRECISION": true, + "WITH": true, + "EXECUTE": true, + "PRIMARY": true, + "WITHIN": true, + "EXISTS": true, + "PRINT": true, + "WRITETEXT": true, + "EXIT": true, + "PROC": true, + } + + mssqlQuoter = schemas.Quoter{ + Prefix: '[', + Suffix: ']', + IsReserved: schemas.AlwaysReserve, + } +) + +type mssql struct { + Base +} + +func (db *mssql) Init(uri *URI) error { + db.quoter = mssqlQuoter + return db.Base.Init(db, uri) +} + +func (db *mssql) SQLType(c *schemas.Column) string { + var res string + switch t := c.SQLType.Name; t { + case schemas.Bool: + res = schemas.Bit + if strings.EqualFold(c.Default, "true") { + c.Default = "1" + } else if strings.EqualFold(c.Default, "false") { + c.Default = "0" + } + return res + case schemas.Serial: + c.IsAutoIncrement = true + c.IsPrimaryKey = true + c.Nullable = false + res = schemas.Int + case schemas.BigSerial: + c.IsAutoIncrement = true + c.IsPrimaryKey = true + c.Nullable = false + res = schemas.BigInt + case schemas.Bytea, schemas.Blob, schemas.Binary, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob: + res = schemas.VarBinary + if c.Length == 0 { + c.Length = 50 + } + case schemas.TimeStamp: + res = schemas.DateTime + case schemas.TimeStampz: + res = "DATETIMEOFFSET" + c.Length = 7 + case schemas.MediumInt: + res = schemas.Int + case schemas.Text, schemas.MediumText, schemas.TinyText, schemas.LongText, schemas.Json: + res = schemas.Varchar + "(MAX)" + case schemas.Double: + res = schemas.Real + case schemas.Uuid: + res = schemas.Varchar + c.Length = 40 + case schemas.TinyInt: + res = schemas.TinyInt + c.Length = 0 + case schemas.BigInt: + res = schemas.BigInt + c.Length = 0 + default: + res = t + } + + if res == schemas.Int || res == schemas.Bit || res == schemas.DateTime { + return res + } + + hasLen1 := (c.Length > 0) + hasLen2 := (c.Length2 > 0) + + if hasLen2 { + res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" + } else if hasLen1 { + res += "(" + strconv.Itoa(c.Length) + ")" + } + return res +} + +func (db *mssql) IsReserved(name string) bool { + _, ok := mssqlReservedWords[strings.ToUpper(name)] + return ok +} + +func (db *mssql) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = mssqlQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = mssqlQuoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = mssqlQuoter + } +} + +func (db *mssql) AutoIncrStr() string { + return "IDENTITY" +} + +func (db *mssql) DropTableSQL(tableName string) (string, bool) { + return fmt.Sprintf("IF EXISTS (SELECT * FROM sysobjects WHERE id = "+ + "object_id(N'%s') and OBJECTPROPERTY(id, N'IsUserTable') = 1) "+ + "DROP TABLE \"%s\"", tableName, tableName), true +} + +func (db *mssql) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + args := []interface{}{idxName} + sql := "select name from sysindexes where id=object_id('" + tableName + "') and name=?" + return sql, args +} + +func (db *mssql) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { + query := `SELECT "COLUMN_NAME" FROM "INFORMATION_SCHEMA"."COLUMNS" WHERE "TABLE_NAME" = ? AND "COLUMN_NAME" = ?` + + return db.HasRecords(queryer, ctx, query, tableName, colName) +} + +func (db *mssql) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { + sql := "select * from sysobjects where id = object_id(N'" + tableName + "') and OBJECTPROPERTY(id, N'IsUserTable') = 1" + return db.HasRecords(queryer, ctx, sql) +} + +func (db *mssql) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{} + s := `select a.name as name, b.name as ctype,a.max_length,a.precision,a.scale,a.is_nullable as nullable, + "default_is_null" = (CASE WHEN c.text is null THEN 1 ELSE 0 END), + replace(replace(isnull(c.text,''),'(',''),')','') as vdefault, + ISNULL(p.is_primary_key, 0), a.is_identity as is_identity + from sys.columns a + left join sys.types b on a.user_type_id=b.user_type_id + left join sys.syscomments c on a.default_object_id=c.id + LEFT OUTER JOIN (SELECT i.object_id, ic.column_id, i.is_primary_key + FROM sys.indexes i + LEFT JOIN sys.index_columns ic ON ic.object_id = i.object_id AND ic.index_id = i.index_id + WHERE i.is_primary_key = 1 + ) as p on p.object_id = a.object_id AND p.column_id = a.column_id + where a.object_id=object_id('` + tableName + `')` + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + cols := make(map[string]*schemas.Column) + colSeq := make([]string, 0) + for rows.Next() { + var name, ctype, vdefault string + var maxLen, precision, scale int + var nullable, isPK, defaultIsNull, isIncrement bool + err = rows.Scan(&name, &ctype, &maxLen, &precision, &scale, &nullable, &defaultIsNull, &vdefault, &isPK, &isIncrement) + if err != nil { + return nil, nil, err + } + + col := new(schemas.Column) + col.Indexes = make(map[string]int) + col.Name = strings.Trim(name, "` ") + col.Nullable = nullable + col.DefaultIsEmpty = defaultIsNull + if !defaultIsNull { + col.Default = vdefault + } + col.IsPrimaryKey = isPK + col.IsAutoIncrement = isIncrement + ct := strings.ToUpper(ctype) + if ct == "DECIMAL" { + col.Length = precision + col.Length2 = scale + } else { + col.Length = maxLen + } + switch ct { + case "DATETIMEOFFSET": + col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} + case "NVARCHAR": + col.SQLType = schemas.SQLType{Name: schemas.NVarchar, DefaultLength: 0, DefaultLength2: 0} + case "IMAGE": + col.SQLType = schemas.SQLType{Name: schemas.VarBinary, DefaultLength: 0, DefaultLength2: 0} + default: + if _, ok := schemas.SqlTypes[ct]; ok { + col.SQLType = schemas.SQLType{Name: ct, DefaultLength: 0, DefaultLength2: 0} + } else { + return nil, nil, fmt.Errorf("Unknown colType %v for %v - %v", ct, tableName, col.Name) + } + } + + cols[col.Name] = col + colSeq = append(colSeq, col.Name) + } + return colSeq, cols, nil +} + +func (db *mssql) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{} + s := `select name from sysobjects where xtype ='U'` + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]*schemas.Table, 0) + for rows.Next() { + table := schemas.NewEmptyTable() + var name string + err = rows.Scan(&name) + if err != nil { + return nil, err + } + table.Name = strings.Trim(name, "` ") + tables = append(tables, table) + } + return tables, nil +} + +func (db *mssql) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{tableName} + s := `SELECT +IXS.NAME AS [INDEX_NAME], +C.NAME AS [COLUMN_NAME], +IXS.is_unique AS [IS_UNIQUE] +FROM SYS.INDEXES IXS +INNER JOIN SYS.INDEX_COLUMNS IXCS +ON IXS.OBJECT_ID=IXCS.OBJECT_ID AND IXS.INDEX_ID = IXCS.INDEX_ID +INNER JOIN SYS.COLUMNS C ON IXS.OBJECT_ID=C.OBJECT_ID +AND IXCS.COLUMN_ID=C.COLUMN_ID +WHERE IXS.TYPE_DESC='NONCLUSTERED' and OBJECT_NAME(IXS.OBJECT_ID) =? +` + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + indexes := make(map[string]*schemas.Index, 0) + for rows.Next() { + var indexType int + var indexName, colName, isUnique string + + err = rows.Scan(&indexName, &colName, &isUnique) + if err != nil { + return nil, err + } + + i, err := strconv.ParseBool(isUnique) + if err != nil { + return nil, err + } + + if i { + indexType = schemas.UniqueType + } else { + indexType = schemas.IndexType + } + + colName = strings.Trim(colName, "` ") + var isRegular bool + if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { + indexName = indexName[5+len(tableName):] + isRegular = true + } + + var index *schemas.Index + var ok bool + if index, ok = indexes[indexName]; !ok { + index = new(schemas.Index) + index.Type = indexType + index.Name = indexName + index.IsRegular = isRegular + indexes[indexName] = index + } + index.AddColumn(colName) + } + return indexes, nil +} + +func (db *mssql) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql string + if tableName == "" { + tableName = table.Name + } + + sql = "IF NOT EXISTS (SELECT [name] FROM sys.tables WHERE [name] = '" + tableName + "' ) CREATE TABLE " + + sql += db.Quoter().Quote(tableName) + " (" + + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + s, _ := ColumnString(db, col, col.IsPrimaryKey && len(pkList) == 1) + sql += s + sql = strings.TrimSpace(sql) + sql += ", " + } + + if len(pkList) > 1 { + sql += "PRIMARY KEY ( " + sql += strings.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + ")" + sql += ";" + return []string{sql}, true +} + +func (db *mssql) ForUpdateSQL(query string) string { + return query +} + +func (db *mssql) Filters() []Filter { + return []Filter{} +} + +type odbcDriver struct { +} + +func (p *odbcDriver) Parse(driverName, dataSourceName string) (*URI, error) { + var dbName string + + if strings.HasPrefix(dataSourceName, "sqlserver://") { + u, err := url.Parse(dataSourceName) + if err != nil { + return nil, err + } + dbName = u.Query().Get("database") + } else { + kv := strings.Split(dataSourceName, ";") + for _, c := range kv { + vv := strings.Split(strings.TrimSpace(c), "=") + if len(vv) == 2 { + switch strings.ToLower(vv[0]) { + case "database": + dbName = vv[1] + } + } + } + } + if dbName == "" { + return nil, errors.New("no db name provided") + } + return &URI{DBName: dbName, DBType: schemas.MSSQL}, nil +} diff --git a/vendor/xorm.io/xorm/dialects/mysql.go b/vendor/xorm.io/xorm/dialects/mysql.go new file mode 100644 index 0000000000..f9a2e9434d --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/mysql.go @@ -0,0 +1,662 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +var ( + mysqlReservedWords = map[string]bool{ + "ADD": true, + "ALL": true, + "ALTER": true, + "ANALYZE": true, + "AND": true, + "AS": true, + "ASC": true, + "ASENSITIVE": true, + "BEFORE": true, + "BETWEEN": true, + "BIGINT": true, + "BINARY": true, + "BLOB": true, + "BOTH": true, + "BY": true, + "CALL": true, + "CASCADE": true, + "CASE": true, + "CHANGE": true, + "CHAR": true, + "CHARACTER": true, + "CHECK": true, + "COLLATE": true, + "COLUMN": true, + "CONDITION": true, + "CONNECTION": true, + "CONSTRAINT": true, + "CONTINUE": true, + "CONVERT": true, + "CREATE": true, + "CROSS": true, + "CURRENT_DATE": true, + "CURRENT_TIME": true, + "CURRENT_TIMESTAMP": true, + "CURRENT_USER": true, + "CURSOR": true, + "DATABASE": true, + "DATABASES": true, + "DAY_HOUR": true, + "DAY_MICROSECOND": true, + "DAY_MINUTE": true, + "DAY_SECOND": true, + "DEC": true, + "DECIMAL": true, + "DECLARE": true, + "DEFAULT": true, + "DELAYED": true, + "DELETE": true, + "DESC": true, + "DESCRIBE": true, + "DETERMINISTIC": true, + "DISTINCT": true, + "DISTINCTROW": true, + "DIV": true, + "DOUBLE": true, + "DROP": true, + "DUAL": true, + "EACH": true, + "ELSE": true, + "ELSEIF": true, + "ENCLOSED": true, + "ESCAPED": true, + "EXISTS": true, + "EXIT": true, + "EXPLAIN": true, + "FALSE": true, + "FETCH": true, + "FLOAT": true, + "FLOAT4": true, + "FLOAT8": true, + "FOR": true, + "FORCE": true, + "FOREIGN": true, + "FROM": true, + "FULLTEXT": true, + "GOTO": true, + "GRANT": true, + "GROUP": true, + "HAVING": true, + "HIGH_PRIORITY": true, + "HOUR_MICROSECOND": true, + "HOUR_MINUTE": true, + "HOUR_SECOND": true, + "IF": true, + "IGNORE": true, + "IN": true, "INDEX": true, + "INFILE": true, "INNER": true, "INOUT": true, + "INSENSITIVE": true, "INSERT": true, "INT": true, + "INT1": true, "INT2": true, "INT3": true, + "INT4": true, "INT8": true, "INTEGER": true, + "INTERVAL": true, "INTO": true, "IS": true, + "ITERATE": true, "JOIN": true, "KEY": true, + "KEYS": true, "KILL": true, "LABEL": true, + "LEADING": true, "LEAVE": true, "LEFT": true, + "LIKE": true, "LIMIT": true, "LINEAR": true, + "LINES": true, "LOAD": true, "LOCALTIME": true, + "LOCALTIMESTAMP": true, "LOCK": true, "LONG": true, + "LONGBLOB": true, "LONGTEXT": true, "LOOP": true, + "LOW_PRIORITY": true, "MATCH": true, "MEDIUMBLOB": true, + "MEDIUMINT": true, "MEDIUMTEXT": true, "MIDDLEINT": true, + "MINUTE_MICROSECOND": true, "MINUTE_SECOND": true, "MOD": true, + "MODIFIES": true, "NATURAL": true, "NOT": true, + "NO_WRITE_TO_BINLOG": true, "NULL": true, "NUMERIC": true, + "ON OPTIMIZE": true, "OPTION": true, + "OPTIONALLY": true, "OR": true, "ORDER": true, + "OUT": true, "OUTER": true, "OUTFILE": true, + "PRECISION": true, "PRIMARY": true, "PROCEDURE": true, + "PURGE": true, "RAID0": true, "RANGE": true, + "READ": true, "READS": true, "REAL": true, + "REFERENCES": true, "REGEXP": true, "RELEASE": true, + "RENAME": true, "REPEAT": true, "REPLACE": true, + "REQUIRE": true, "RESTRICT": true, "RETURN": true, + "REVOKE": true, "RIGHT": true, "RLIKE": true, + "SCHEMA": true, "SCHEMAS": true, "SECOND_MICROSECOND": true, + "SELECT": true, "SENSITIVE": true, "SEPARATOR": true, + "SET": true, "SHOW": true, "SMALLINT": true, + "SPATIAL": true, "SPECIFIC": true, "SQL": true, + "SQLEXCEPTION": true, "SQLSTATE": true, "SQLWARNING": true, + "SQL_BIG_RESULT": true, "SQL_CALC_FOUND_ROWS": true, "SQL_SMALL_RESULT": true, + "SSL": true, "STARTING": true, "STRAIGHT_JOIN": true, + "TABLE": true, "TERMINATED": true, "THEN": true, + "TINYBLOB": true, "TINYINT": true, "TINYTEXT": true, + "TO": true, "TRAILING": true, "TRIGGER": true, + "TRUE": true, "UNDO": true, "UNION": true, + "UNIQUE": true, "UNLOCK": true, "UNSIGNED": true, + "UPDATE": true, "USAGE": true, "USE": true, + "USING": true, "UTC_DATE": true, "UTC_TIME": true, + "UTC_TIMESTAMP": true, "VALUES": true, "VARBINARY": true, + "VARCHAR": true, + "VARCHARACTER": true, + "VARYING": true, + "WHEN": true, + "WHERE": true, + "WHILE": true, + "WITH": true, + "WRITE": true, + "X509": true, + "XOR": true, + "YEAR_MONTH": true, + "ZEROFILL": true, + } + + mysqlQuoter = schemas.Quoter{ + Prefix: '`', + Suffix: '`', + IsReserved: schemas.AlwaysReserve, + } +) + +type mysql struct { + Base + net string + addr string + params map[string]string + loc *time.Location + timeout time.Duration + tls *tls.Config + allowAllFiles bool + allowOldPasswords bool + clientFoundRows bool + rowFormat string +} + +func (db *mysql) Init(uri *URI) error { + db.quoter = mysqlQuoter + return db.Base.Init(db, uri) +} + +func (db *mysql) SetParams(params map[string]string) { + rowFormat, ok := params["rowFormat"] + if ok { + var t = strings.ToUpper(rowFormat) + switch t { + case "COMPACT": + fallthrough + case "REDUNDANT": + fallthrough + case "DYNAMIC": + fallthrough + case "COMPRESSED": + db.rowFormat = t + break + default: + break + } + } +} + +func (db *mysql) SQLType(c *schemas.Column) string { + var res string + switch t := c.SQLType.Name; t { + case schemas.Bool: + res = schemas.TinyInt + c.Length = 1 + case schemas.Serial: + c.IsAutoIncrement = true + c.IsPrimaryKey = true + c.Nullable = false + res = schemas.Int + case schemas.BigSerial: + c.IsAutoIncrement = true + c.IsPrimaryKey = true + c.Nullable = false + res = schemas.BigInt + case schemas.Bytea: + res = schemas.Blob + case schemas.TimeStampz: + res = schemas.Char + c.Length = 64 + case schemas.Enum: // mysql enum + res = schemas.Enum + res += "(" + opts := "" + for v := range c.EnumOptions { + opts += fmt.Sprintf(",'%v'", v) + } + res += strings.TrimLeft(opts, ",") + res += ")" + case schemas.Set: // mysql set + res = schemas.Set + res += "(" + opts := "" + for v := range c.SetOptions { + opts += fmt.Sprintf(",'%v'", v) + } + res += strings.TrimLeft(opts, ",") + res += ")" + case schemas.NVarchar: + res = schemas.Varchar + case schemas.Uuid: + res = schemas.Varchar + c.Length = 40 + case schemas.Json: + res = schemas.Text + default: + res = t + } + + hasLen1 := (c.Length > 0) + hasLen2 := (c.Length2 > 0) + + if res == schemas.BigInt && !hasLen1 && !hasLen2 { + c.Length = 20 + hasLen1 = true + } + + if hasLen2 { + res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" + } else if hasLen1 { + res += "(" + strconv.Itoa(c.Length) + ")" + } + return res +} + +func (db *mysql) IsReserved(name string) bool { + _, ok := mysqlReservedWords[strings.ToUpper(name)] + return ok +} + +func (db *mysql) AutoIncrStr() string { + return "AUTO_INCREMENT" +} + +func (db *mysql) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + args := []interface{}{db.uri.DBName, tableName, idxName} + sql := "SELECT `INDEX_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS`" + sql += " WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `INDEX_NAME`=?" + return sql, args +} + +func (db *mysql) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { + sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?" + return db.HasRecords(queryer, ctx, sql, db.uri.DBName, tableName) +} + +func (db *mysql) AddColumnSQL(tableName string, col *schemas.Column) string { + quoter := db.dialect.Quoter() + s, _ := ColumnString(db, col, true) + sql := fmt.Sprintf("ALTER TABLE %v ADD %v", quoter.Quote(tableName), s) + if len(col.Comment) > 0 { + sql += " COMMENT '" + col.Comment + "'" + } + return sql +} + +func (db *mysql) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{db.uri.DBName, tableName} + s := "SELECT `COLUMN_NAME`, `IS_NULLABLE`, `COLUMN_DEFAULT`, `COLUMN_TYPE`," + + " `COLUMN_KEY`, `EXTRA`,`COLUMN_COMMENT` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" + + " ORDER BY `INFORMATION_SCHEMA`.`COLUMNS`.ORDINAL_POSITION" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + cols := make(map[string]*schemas.Column) + colSeq := make([]string, 0) + for rows.Next() { + col := new(schemas.Column) + col.Indexes = make(map[string]int) + + var columnName, isNullable, colType, colKey, extra, comment string + var colDefault *string + err = rows.Scan(&columnName, &isNullable, &colDefault, &colType, &colKey, &extra, &comment) + if err != nil { + return nil, nil, err + } + col.Name = strings.Trim(columnName, "` ") + col.Comment = comment + if "YES" == isNullable { + col.Nullable = true + } + + if colDefault != nil { + col.Default = *colDefault + col.DefaultIsEmpty = false + } else { + col.DefaultIsEmpty = true + } + + cts := strings.Split(colType, "(") + colName := cts[0] + colType = strings.ToUpper(colName) + var len1, len2 int + if len(cts) == 2 { + idx := strings.Index(cts[1], ")") + if colType == schemas.Enum && cts[1][0] == '\'' { // enum + options := strings.Split(cts[1][0:idx], ",") + col.EnumOptions = make(map[string]int) + for k, v := range options { + v = strings.TrimSpace(v) + v = strings.Trim(v, "'") + col.EnumOptions[v] = k + } + } else if colType == schemas.Set && cts[1][0] == '\'' { + options := strings.Split(cts[1][0:idx], ",") + col.SetOptions = make(map[string]int) + for k, v := range options { + v = strings.TrimSpace(v) + v = strings.Trim(v, "'") + col.SetOptions[v] = k + } + } else { + lens := strings.Split(cts[1][0:idx], ",") + len1, err = strconv.Atoi(strings.TrimSpace(lens[0])) + if err != nil { + return nil, nil, err + } + if len(lens) == 2 { + len2, err = strconv.Atoi(lens[1]) + if err != nil { + return nil, nil, err + } + } + } + } + if colType == "FLOAT UNSIGNED" { + colType = "FLOAT" + } + if colType == "DOUBLE UNSIGNED" { + colType = "DOUBLE" + } + col.Length = len1 + col.Length2 = len2 + if _, ok := schemas.SqlTypes[colType]; ok { + col.SQLType = schemas.SQLType{Name: colType, DefaultLength: len1, DefaultLength2: len2} + } else { + return nil, nil, fmt.Errorf("Unknown colType %v", colType) + } + + if colKey == "PRI" { + col.IsPrimaryKey = true + } + if colKey == "UNI" { + // col.is + } + + if extra == "auto_increment" { + col.IsAutoIncrement = true + } + + if !col.DefaultIsEmpty { + if col.SQLType.IsText() { + col.Default = "'" + col.Default + "'" + } else if col.SQLType.IsTime() && col.Default != "CURRENT_TIMESTAMP" { + col.Default = "'" + col.Default + "'" + } + } + cols[col.Name] = col + colSeq = append(colSeq, col.Name) + } + return colSeq, cols, nil +} + +func (db *mysql) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{db.uri.DBName} + s := "SELECT `TABLE_NAME`, `ENGINE`, `AUTO_INCREMENT`, `TABLE_COMMENT` from " + + "`INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? AND (`ENGINE`='MyISAM' OR `ENGINE` = 'InnoDB' OR `ENGINE` = 'TokuDB')" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]*schemas.Table, 0) + for rows.Next() { + table := schemas.NewEmptyTable() + var name, engine string + var autoIncr, comment *string + err = rows.Scan(&name, &engine, &autoIncr, &comment) + if err != nil { + return nil, err + } + + table.Name = name + if comment != nil { + table.Comment = *comment + } + table.StoreEngine = engine + tables = append(tables, table) + } + return tables, nil +} + +func (db *mysql) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = mysqlQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = mysqlQuoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = mysqlQuoter + } +} + +func (db *mysql) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{db.uri.DBName, tableName} + s := "SELECT `INDEX_NAME`, `NON_UNIQUE`, `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + indexes := make(map[string]*schemas.Index, 0) + for rows.Next() { + var indexType int + var indexName, colName, nonUnique string + err = rows.Scan(&indexName, &nonUnique, &colName) + if err != nil { + return nil, err + } + + if indexName == "PRIMARY" { + continue + } + + if "YES" == nonUnique || nonUnique == "1" { + indexType = schemas.IndexType + } else { + indexType = schemas.UniqueType + } + + colName = strings.Trim(colName, "` ") + var isRegular bool + if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { + indexName = indexName[5+len(tableName):] + isRegular = true + } + + var index *schemas.Index + var ok bool + if index, ok = indexes[indexName]; !ok { + index = new(schemas.Index) + index.IsRegular = isRegular + index.Type = indexType + index.Name = indexName + indexes[indexName] = index + } + index.AddColumn(colName) + } + return indexes, nil +} + +func (db *mysql) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql = "CREATE TABLE IF NOT EXISTS " + if tableName == "" { + tableName = table.Name + } + + quoter := db.Quoter() + + sql += quoter.Quote(tableName) + sql += " (" + + if len(table.ColumnsSeq()) > 0 { + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + s, _ := ColumnString(db, col, col.IsPrimaryKey && len(pkList) == 1) + sql += s + sql = strings.TrimSpace(sql) + if len(col.Comment) > 0 { + sql += " COMMENT '" + col.Comment + "'" + } + sql += ", " + } + + if len(pkList) > 1 { + sql += "PRIMARY KEY ( " + sql += quoter.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + } + sql += ")" + + if table.StoreEngine != "" { + sql += " ENGINE=" + table.StoreEngine + } + + var charset = table.Charset + if len(charset) == 0 { + charset = db.URI().Charset + } + if len(charset) != 0 { + sql += " DEFAULT CHARSET " + charset + } + + if db.rowFormat != "" { + sql += " ROW_FORMAT=" + db.rowFormat + } + return []string{sql}, true +} + +func (db *mysql) Filters() []Filter { + return []Filter{} +} + +type mymysqlDriver struct { +} + +func (p *mymysqlDriver) Parse(driverName, dataSourceName string) (*URI, error) { + uri := &URI{DBType: schemas.MYSQL} + + pd := strings.SplitN(dataSourceName, "*", 2) + if len(pd) == 2 { + // Parse protocol part of URI + p := strings.SplitN(pd[0], ":", 2) + if len(p) != 2 { + return nil, errors.New("Wrong protocol part of URI") + } + uri.Proto = p[0] + options := strings.Split(p[1], ",") + uri.Raddr = options[0] + for _, o := range options[1:] { + kv := strings.SplitN(o, "=", 2) + var k, v string + if len(kv) == 2 { + k, v = kv[0], kv[1] + } else { + k, v = o, "true" + } + switch k { + case "laddr": + uri.Laddr = v + case "timeout": + to, err := time.ParseDuration(v) + if err != nil { + return nil, err + } + uri.Timeout = to + default: + return nil, errors.New("Unknown option: " + k) + } + } + // Remove protocol part + pd = pd[1:] + } + // Parse database part of URI + dup := strings.SplitN(pd[0], "/", 3) + if len(dup) != 3 { + return nil, errors.New("Wrong database part of URI") + } + uri.DBName = dup[0] + uri.User = dup[1] + uri.Passwd = dup[2] + + return uri, nil +} + +type mysqlDriver struct { +} + +func (p *mysqlDriver) Parse(driverName, dataSourceName string) (*URI, error) { + dsnPattern := regexp.MustCompile( + `^(?:(?P.*?)(?::(?P.*))?@)?` + // [user[:password]@] + `(?:(?P[^\(]*)(?:\((?P[^\)]*)\))?)?` + // [net[(addr)]] + `\/(?P.*?)` + // /dbname + `(?:\?(?P[^\?]*))?$`) // [?param1=value1¶mN=valueN] + matches := dsnPattern.FindStringSubmatch(dataSourceName) + // tlsConfigRegister := make(map[string]*tls.Config) + names := dsnPattern.SubexpNames() + + uri := &URI{DBType: schemas.MYSQL} + + for i, match := range matches { + switch names[i] { + case "dbname": + uri.DBName = match + case "params": + if len(match) > 0 { + kvs := strings.Split(match, "&") + for _, kv := range kvs { + splits := strings.Split(kv, "=") + if len(splits) == 2 { + switch splits[0] { + case "charset": + uri.Charset = splits[1] + } + } + } + } + + } + } + return uri, nil +} diff --git a/vendor/xorm.io/xorm/dialects/oracle.go b/vendor/xorm.io/xorm/dialects/oracle.go new file mode 100644 index 0000000000..91eed25169 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/oracle.go @@ -0,0 +1,854 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +var ( + oracleReservedWords = map[string]bool{ + "ACCESS": true, + "ACCOUNT": true, + "ACTIVATE": true, + "ADD": true, + "ADMIN": true, + "ADVISE": true, + "AFTER": true, + "ALL": true, + "ALL_ROWS": true, + "ALLOCATE": true, + "ALTER": true, + "ANALYZE": true, + "AND": true, + "ANY": true, + "ARCHIVE": true, + "ARCHIVELOG": true, + "ARRAY": true, + "AS": true, + "ASC": true, + "AT": true, + "AUDIT": true, + "AUTHENTICATED": true, + "AUTHORIZATION": true, + "AUTOEXTEND": true, + "AUTOMATIC": true, + "BACKUP": true, + "BECOME": true, + "BEFORE": true, + "BEGIN": true, + "BETWEEN": true, + "BFILE": true, + "BITMAP": true, + "BLOB": true, + "BLOCK": true, + "BODY": true, + "BY": true, + "CACHE": true, + "CACHE_INSTANCES": true, + "CANCEL": true, + "CASCADE": true, + "CAST": true, + "CFILE": true, + "CHAINED": true, + "CHANGE": true, + "CHAR": true, + "CHAR_CS": true, + "CHARACTER": true, + "CHECK": true, + "CHECKPOINT": true, + "CHOOSE": true, + "CHUNK": true, + "CLEAR": true, + "CLOB": true, + "CLONE": true, + "CLOSE": true, + "CLOSE_CACHED_OPEN_CURSORS": true, + "CLUSTER": true, + "COALESCE": true, + "COLUMN": true, + "COLUMNS": true, + "COMMENT": true, + "COMMIT": true, + "COMMITTED": true, + "COMPATIBILITY": true, + "COMPILE": true, + "COMPLETE": true, + "COMPOSITE_LIMIT": true, + "COMPRESS": true, + "COMPUTE": true, + "CONNECT": true, + "CONNECT_TIME": true, + "CONSTRAINT": true, + "CONSTRAINTS": true, + "CONTENTS": true, + "CONTINUE": true, + "CONTROLFILE": true, + "CONVERT": true, + "COST": true, + "CPU_PER_CALL": true, + "CPU_PER_SESSION": true, + "CREATE": true, + "CURRENT": true, + "CURRENT_SCHEMA": true, + "CURREN_USER": true, + "CURSOR": true, + "CYCLE": true, + "DANGLING": true, + "DATABASE": true, + "DATAFILE": true, + "DATAFILES": true, + "DATAOBJNO": true, + "DATE": true, + "DBA": true, + "DBHIGH": true, + "DBLOW": true, + "DBMAC": true, + "DEALLOCATE": true, + "DEBUG": true, + "DEC": true, + "DECIMAL": true, + "DECLARE": true, + "DEFAULT": true, + "DEFERRABLE": true, + "DEFERRED": true, + "DEGREE": true, + "DELETE": true, + "DEREF": true, + "DESC": true, + "DIRECTORY": true, + "DISABLE": true, + "DISCONNECT": true, + "DISMOUNT": true, + "DISTINCT": true, + "DISTRIBUTED": true, + "DML": true, + "DOUBLE": true, + "DROP": true, + "DUMP": true, + "EACH": true, + "ELSE": true, + "ENABLE": true, + "END": true, + "ENFORCE": true, + "ENTRY": true, + "ESCAPE": true, + "EXCEPT": true, + "EXCEPTIONS": true, + "EXCHANGE": true, + "EXCLUDING": true, + "EXCLUSIVE": true, + "EXECUTE": true, + "EXISTS": true, + "EXPIRE": true, + "EXPLAIN": true, + "EXTENT": true, + "EXTENTS": true, + "EXTERNALLY": true, + "FAILED_LOGIN_ATTEMPTS": true, + "FALSE": true, + "FAST": true, + "FILE": true, + "FIRST_ROWS": true, + "FLAGGER": true, + "FLOAT": true, + "FLOB": true, + "FLUSH": true, + "FOR": true, + "FORCE": true, + "FOREIGN": true, + "FREELIST": true, + "FREELISTS": true, + "FROM": true, + "FULL": true, + "FUNCTION": true, + "GLOBAL": true, + "GLOBALLY": true, + "GLOBAL_NAME": true, + "GRANT": true, + "GROUP": true, + "GROUPS": true, + "HASH": true, + "HASHKEYS": true, + "HAVING": true, + "HEADER": true, + "HEAP": true, + "IDENTIFIED": true, + "IDGENERATORS": true, + "IDLE_TIME": true, + "IF": true, + "IMMEDIATE": true, + "IN": true, + "INCLUDING": true, + "INCREMENT": true, + "INDEX": true, + "INDEXED": true, + "INDEXES": true, + "INDICATOR": true, + "IND_PARTITION": true, + "INITIAL": true, + "INITIALLY": true, + "INITRANS": true, + "INSERT": true, + "INSTANCE": true, + "INSTANCES": true, + "INSTEAD": true, + "INT": true, + "INTEGER": true, + "INTERMEDIATE": true, + "INTERSECT": true, + "INTO": true, + "IS": true, + "ISOLATION": true, + "ISOLATION_LEVEL": true, + "KEEP": true, + "KEY": true, + "KILL": true, + "LABEL": true, + "LAYER": true, + "LESS": true, + "LEVEL": true, + "LIBRARY": true, + "LIKE": true, + "LIMIT": true, + "LINK": true, + "LIST": true, + "LOB": true, + "LOCAL": true, + "LOCK": true, + "LOCKED": true, + "LOG": true, + "LOGFILE": true, + "LOGGING": true, + "LOGICAL_READS_PER_CALL": true, + "LOGICAL_READS_PER_SESSION": true, + "LONG": true, + "MANAGE": true, + "MASTER": true, + "MAX": true, + "MAXARCHLOGS": true, + "MAXDATAFILES": true, + "MAXEXTENTS": true, + "MAXINSTANCES": true, + "MAXLOGFILES": true, + "MAXLOGHISTORY": true, + "MAXLOGMEMBERS": true, + "MAXSIZE": true, + "MAXTRANS": true, + "MAXVALUE": true, + "MIN": true, + "MEMBER": true, + "MINIMUM": true, + "MINEXTENTS": true, + "MINUS": true, + "MINVALUE": true, + "MLSLABEL": true, + "MLS_LABEL_FORMAT": true, + "MODE": true, + "MODIFY": true, + "MOUNT": true, + "MOVE": true, + "MTS_DISPATCHERS": true, + "MULTISET": true, + "NATIONAL": true, + "NCHAR": true, + "NCHAR_CS": true, + "NCLOB": true, + "NEEDED": true, + "NESTED": true, + "NETWORK": true, + "NEW": true, + "NEXT": true, + "NOARCHIVELOG": true, + "NOAUDIT": true, + "NOCACHE": true, + "NOCOMPRESS": true, + "NOCYCLE": true, + "NOFORCE": true, + "NOLOGGING": true, + "NOMAXVALUE": true, + "NOMINVALUE": true, + "NONE": true, + "NOORDER": true, + "NOOVERRIDE": true, + "NOPARALLEL": true, + "NOREVERSE": true, + "NORMAL": true, + "NOSORT": true, + "NOT": true, + "NOTHING": true, + "NOWAIT": true, + "NULL": true, + "NUMBER": true, + "NUMERIC": true, + "NVARCHAR2": true, + "OBJECT": true, + "OBJNO": true, + "OBJNO_REUSE": true, + "OF": true, + "OFF": true, + "OFFLINE": true, + "OID": true, + "OIDINDEX": true, + "OLD": true, + "ON": true, + "ONLINE": true, + "ONLY": true, + "OPCODE": true, + "OPEN": true, + "OPTIMAL": true, + "OPTIMIZER_GOAL": true, + "OPTION": true, + "OR": true, + "ORDER": true, + "ORGANIZATION": true, + "OSLABEL": true, + "OVERFLOW": true, + "OWN": true, + "PACKAGE": true, + "PARALLEL": true, + "PARTITION": true, + "PASSWORD": true, + "PASSWORD_GRACE_TIME": true, + "PASSWORD_LIFE_TIME": true, + "PASSWORD_LOCK_TIME": true, + "PASSWORD_REUSE_MAX": true, + "PASSWORD_REUSE_TIME": true, + "PASSWORD_VERIFY_FUNCTION": true, + "PCTFREE": true, + "PCTINCREASE": true, + "PCTTHRESHOLD": true, + "PCTUSED": true, + "PCTVERSION": true, + "PERCENT": true, + "PERMANENT": true, + "PLAN": true, + "PLSQL_DEBUG": true, + "POST_TRANSACTION": true, + "PRECISION": true, + "PRESERVE": true, + "PRIMARY": true, + "PRIOR": true, + "PRIVATE": true, + "PRIVATE_SGA": true, + "PRIVILEGE": true, + "PRIVILEGES": true, + "PROCEDURE": true, + "PROFILE": true, + "PUBLIC": true, + "PURGE": true, + "QUEUE": true, + "QUOTA": true, + "RANGE": true, + "RAW": true, + "RBA": true, + "READ": true, + "READUP": true, + "REAL": true, + "REBUILD": true, + "RECOVER": true, + "RECOVERABLE": true, + "RECOVERY": true, + "REF": true, + "REFERENCES": true, + "REFERENCING": true, + "REFRESH": true, + "RENAME": true, + "REPLACE": true, + "RESET": true, + "RESETLOGS": true, + "RESIZE": true, + "RESOURCE": true, + "RESTRICTED": true, + "RETURN": true, + "RETURNING": true, + "REUSE": true, + "REVERSE": true, + "REVOKE": true, + "ROLE": true, + "ROLES": true, + "ROLLBACK": true, + "ROW": true, + "ROWID": true, + "ROWNUM": true, + "ROWS": true, + "RULE": true, + "SAMPLE": true, + "SAVEPOINT": true, + "SB4": true, + "SCAN_INSTANCES": true, + "SCHEMA": true, + "SCN": true, + "SCOPE": true, + "SD_ALL": true, + "SD_INHIBIT": true, + "SD_SHOW": true, + "SEGMENT": true, + "SEG_BLOCK": true, + "SEG_FILE": true, + "SELECT": true, + "SEQUENCE": true, + "SERIALIZABLE": true, + "SESSION": true, + "SESSION_CACHED_CURSORS": true, + "SESSIONS_PER_USER": true, + "SET": true, + "SHARE": true, + "SHARED": true, + "SHARED_POOL": true, + "SHRINK": true, + "SIZE": true, + "SKIP": true, + "SKIP_UNUSABLE_INDEXES": true, + "SMALLINT": true, + "SNAPSHOT": true, + "SOME": true, + "SORT": true, + "SPECIFICATION": true, + "SPLIT": true, + "SQL_TRACE": true, + "STANDBY": true, + "START": true, + "STATEMENT_ID": true, + "STATISTICS": true, + "STOP": true, + "STORAGE": true, + "STORE": true, + "STRUCTURE": true, + "SUCCESSFUL": true, + "SWITCH": true, + "SYS_OP_ENFORCE_NOT_NULL$": true, + "SYS_OP_NTCIMG$": true, + "SYNONYM": true, + "SYSDATE": true, + "SYSDBA": true, + "SYSOPER": true, + "SYSTEM": true, + "TABLE": true, + "TABLES": true, + "TABLESPACE": true, + "TABLESPACE_NO": true, + "TABNO": true, + "TEMPORARY": true, + "THAN": true, + "THE": true, + "THEN": true, + "THREAD": true, + "TIMESTAMP": true, + "TIME": true, + "TO": true, + "TOPLEVEL": true, + "TRACE": true, + "TRACING": true, + "TRANSACTION": true, + "TRANSITIONAL": true, + "TRIGGER": true, + "TRIGGERS": true, + "TRUE": true, + "TRUNCATE": true, + "TX": true, + "TYPE": true, + "UB2": true, + "UBA": true, + "UID": true, + "UNARCHIVED": true, + "UNDO": true, + "UNION": true, + "UNIQUE": true, + "UNLIMITED": true, + "UNLOCK": true, + "UNRECOVERABLE": true, + "UNTIL": true, + "UNUSABLE": true, + "UNUSED": true, + "UPDATABLE": true, + "UPDATE": true, + "USAGE": true, + "USE": true, + "USER": true, + "USING": true, + "VALIDATE": true, + "VALIDATION": true, + "VALUE": true, + "VALUES": true, + "VARCHAR": true, + "VARCHAR2": true, + "VARYING": true, + "VIEW": true, + "WHEN": true, + "WHENEVER": true, + "WHERE": true, + "WITH": true, + "WITHOUT": true, + "WORK": true, + "WRITE": true, + "WRITEDOWN": true, + "WRITEUP": true, + "XID": true, + "YEAR": true, + "ZONE": true, + } + + oracleQuoter = schemas.Quoter{ + Prefix: '"', + Suffix: '"', + IsReserved: schemas.AlwaysReserve, + } +) + +type oracle struct { + Base +} + +func (db *oracle) Init(uri *URI) error { + db.quoter = oracleQuoter + return db.Base.Init(db, uri) +} + +func (db *oracle) SQLType(c *schemas.Column) string { + var res string + switch t := c.SQLType.Name; t { + case schemas.Bit, schemas.TinyInt, schemas.SmallInt, schemas.MediumInt, schemas.Int, schemas.Integer, schemas.BigInt, schemas.Bool, schemas.Serial, schemas.BigSerial: + res = "NUMBER" + case schemas.Binary, schemas.VarBinary, schemas.Blob, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob, schemas.Bytea: + return schemas.Blob + case schemas.Time, schemas.DateTime, schemas.TimeStamp: + res = schemas.TimeStamp + case schemas.TimeStampz: + res = "TIMESTAMP WITH TIME ZONE" + case schemas.Float, schemas.Double, schemas.Numeric, schemas.Decimal: + res = "NUMBER" + case schemas.Text, schemas.MediumText, schemas.LongText, schemas.Json: + res = "CLOB" + case schemas.Char, schemas.Varchar, schemas.TinyText: + res = "VARCHAR2" + default: + res = t + } + + hasLen1 := (c.Length > 0) + hasLen2 := (c.Length2 > 0) + + if hasLen2 { + res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" + } else if hasLen1 { + res += "(" + strconv.Itoa(c.Length) + ")" + } + return res +} + +func (db *oracle) AutoIncrStr() string { + return "AUTO_INCREMENT" +} + +func (db *oracle) IsReserved(name string) bool { + _, ok := oracleReservedWords[strings.ToUpper(name)] + return ok +} + +func (db *oracle) DropTableSQL(tableName string) (string, bool) { + return fmt.Sprintf("DROP TABLE `%s`", tableName), false +} + +func (db *oracle) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql = "CREATE TABLE " + if tableName == "" { + tableName = table.Name + } + + quoter := db.Quoter() + sql += quoter.Quote(tableName) + " (" + + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + /*if col.IsPrimaryKey && len(pkList) == 1 { + sql += col.String(b.dialect) + } else {*/ + s, _ := ColumnString(db, col, false) + sql += s + // } + sql = strings.TrimSpace(sql) + sql += ", " + } + + if len(pkList) > 0 { + sql += "PRIMARY KEY ( " + sql += quoter.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + ")" + return []string{sql}, false +} + +func (db *oracle) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = oracleQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = oracleQuoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = oracleQuoter + } +} + +func (db *oracle) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + args := []interface{}{tableName, idxName} + return `SELECT INDEX_NAME FROM USER_INDEXES ` + + `WHERE TABLE_NAME = :1 AND INDEX_NAME = :2`, args +} + +func (db *oracle) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { + return db.HasRecords(queryer, ctx, `SELECT table_name FROM user_tables WHERE table_name = :1`, tableName) +} + +func (db *oracle) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { + args := []interface{}{tableName, colName} + query := "SELECT column_name FROM USER_TAB_COLUMNS WHERE table_name = :1" + + " AND column_name = :2" + return db.HasRecords(queryer, ctx, query, args...) +} + +func (db *oracle) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{tableName} + s := "SELECT column_name,data_default,data_type,data_length,data_precision,data_scale," + + "nullable FROM USER_TAB_COLUMNS WHERE table_name = :1" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + cols := make(map[string]*schemas.Column) + colSeq := make([]string, 0) + for rows.Next() { + col := new(schemas.Column) + col.Indexes = make(map[string]int) + + var colName, colDefault, nullable, dataType, dataPrecision, dataScale *string + var dataLen int + + err = rows.Scan(&colName, &colDefault, &dataType, &dataLen, &dataPrecision, + &dataScale, &nullable) + if err != nil { + return nil, nil, err + } + + col.Name = strings.Trim(*colName, `" `) + if colDefault != nil { + col.Default = *colDefault + col.DefaultIsEmpty = false + } + + if *nullable == "Y" { + col.Nullable = true + } else { + col.Nullable = false + } + + var ignore bool + + var dt string + var len1, len2 int + dts := strings.Split(*dataType, "(") + dt = dts[0] + if len(dts) > 1 { + lens := strings.Split(dts[1][:len(dts[1])-1], ",") + if len(lens) > 1 { + len1, _ = strconv.Atoi(lens[0]) + len2, _ = strconv.Atoi(lens[1]) + } else { + len1, _ = strconv.Atoi(lens[0]) + } + } + + switch dt { + case "VARCHAR2": + col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: len1, DefaultLength2: len2} + case "NVARCHAR2": + col.SQLType = schemas.SQLType{Name: schemas.NVarchar, DefaultLength: len1, DefaultLength2: len2} + case "TIMESTAMP WITH TIME ZONE": + col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} + case "NUMBER": + col.SQLType = schemas.SQLType{Name: schemas.Double, DefaultLength: len1, DefaultLength2: len2} + case "LONG", "LONG RAW": + col.SQLType = schemas.SQLType{Name: schemas.Text, DefaultLength: 0, DefaultLength2: 0} + case "RAW": + col.SQLType = schemas.SQLType{Name: schemas.Binary, DefaultLength: 0, DefaultLength2: 0} + case "ROWID": + col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: 18, DefaultLength2: 0} + case "AQ$_SUBSCRIBERS": + ignore = true + default: + col.SQLType = schemas.SQLType{Name: strings.ToUpper(dt), DefaultLength: len1, DefaultLength2: len2} + } + + if ignore { + continue + } + + if _, ok := schemas.SqlTypes[col.SQLType.Name]; !ok { + return nil, nil, fmt.Errorf("Unknown colType %v %v", *dataType, col.SQLType) + } + + col.Length = dataLen + + if col.SQLType.IsText() || col.SQLType.IsTime() { + if !col.DefaultIsEmpty { + col.Default = "'" + col.Default + "'" + } + } + cols[col.Name] = col + colSeq = append(colSeq, col.Name) + } + + return colSeq, cols, nil +} + +func (db *oracle) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{} + s := "SELECT table_name FROM user_tables" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]*schemas.Table, 0) + for rows.Next() { + table := schemas.NewEmptyTable() + err = rows.Scan(&table.Name) + if err != nil { + return nil, err + } + + tables = append(tables, table) + } + return tables, nil +} + +func (db *oracle) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{tableName} + s := "SELECT t.column_name,i.uniqueness,i.index_name FROM user_ind_columns t,user_indexes i " + + "WHERE t.index_name = i.index_name and t.table_name = i.table_name and t.table_name =:1" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + indexes := make(map[string]*schemas.Index, 0) + for rows.Next() { + var indexType int + var indexName, colName, uniqueness string + + err = rows.Scan(&colName, &uniqueness, &indexName) + if err != nil { + return nil, err + } + + indexName = strings.Trim(indexName, `" `) + + var isRegular bool + if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { + indexName = indexName[5+len(tableName):] + isRegular = true + } + + if uniqueness == "UNIQUE" { + indexType = schemas.UniqueType + } else { + indexType = schemas.IndexType + } + + var index *schemas.Index + var ok bool + if index, ok = indexes[indexName]; !ok { + index = new(schemas.Index) + index.Type = indexType + index.Name = indexName + index.IsRegular = isRegular + indexes[indexName] = index + } + index.AddColumn(colName) + } + return indexes, nil +} + +func (db *oracle) Filters() []Filter { + return []Filter{ + &SeqFilter{Prefix: ":", Start: 1}, + } +} + +type goracleDriver struct { +} + +func (cfg *goracleDriver) Parse(driverName, dataSourceName string) (*URI, error) { + db := &URI{DBType: schemas.ORACLE} + dsnPattern := regexp.MustCompile( + `^(?:(?P.*?)(?::(?P.*))?@)?` + // [user[:password]@] + `(?:(?P[^\(]*)(?:\((?P[^\)]*)\))?)?` + // [net[(addr)]] + `\/(?P.*?)` + // /dbname + `(?:\?(?P[^\?]*))?$`) // [?param1=value1¶mN=valueN] + matches := dsnPattern.FindStringSubmatch(dataSourceName) + // tlsConfigRegister := make(map[string]*tls.Config) + names := dsnPattern.SubexpNames() + + for i, match := range matches { + switch names[i] { + case "dbname": + db.DBName = match + } + } + if db.DBName == "" { + return nil, errors.New("dbname is empty") + } + return db, nil +} + +type oci8Driver struct { +} + +// dataSourceName=user/password@ipv4:port/dbname +// dataSourceName=user/password@[ipv6]:port/dbname +func (p *oci8Driver) Parse(driverName, dataSourceName string) (*URI, error) { + db := &URI{DBType: schemas.ORACLE} + dsnPattern := regexp.MustCompile( + `^(?P.*)\/(?P.*)@` + // user:password@ + `(?P.*)` + // ip:port + `\/(?P.*)`) // dbname + matches := dsnPattern.FindStringSubmatch(dataSourceName) + names := dsnPattern.SubexpNames() + for i, match := range matches { + switch names[i] { + case "dbname": + db.DBName = match + } + } + if db.DBName == "" && len(matches) != 0 { + return nil, errors.New("dbname is empty") + } + return db, nil +} diff --git a/vendor/xorm.io/xorm/dialects/pg_reserved.txt b/vendor/xorm.io/xorm/dialects/pg_reserved.txt new file mode 100644 index 0000000000..720ed377ba --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/pg_reserved.txt @@ -0,0 +1,746 @@ +A non-reserved non-reserved +ABORT non-reserved +ABS reserved reserved +ABSENT non-reserved non-reserved +ABSOLUTE non-reserved non-reserved non-reserved reserved +ACCESS non-reserved +ACCORDING non-reserved non-reserved +ACTION non-reserved non-reserved non-reserved reserved +ADA non-reserved non-reserved non-reserved +ADD non-reserved non-reserved non-reserved reserved +ADMIN non-reserved non-reserved non-reserved +AFTER non-reserved non-reserved non-reserved +AGGREGATE non-reserved +ALL reserved reserved reserved reserved +ALLOCATE reserved reserved reserved +ALSO non-reserved +ALTER non-reserved reserved reserved reserved +ALWAYS non-reserved non-reserved non-reserved +ANALYSE reserved +ANALYZE reserved +AND reserved reserved reserved reserved +ANY reserved reserved reserved reserved +ARE reserved reserved reserved +ARRAY reserved reserved reserved +ARRAY_AGG reserved reserved +ARRAY_MAX_CARDINALITY reserved +AS reserved reserved reserved reserved +ASC reserved non-reserved non-reserved reserved +ASENSITIVE reserved reserved +ASSERTION non-reserved non-reserved non-reserved reserved +ASSIGNMENT non-reserved non-reserved non-reserved +ASYMMETRIC reserved reserved reserved +AT non-reserved reserved reserved reserved +ATOMIC reserved reserved +ATTRIBUTE non-reserved non-reserved non-reserved +ATTRIBUTES non-reserved non-reserved +AUTHORIZATION reserved (can be function or type) reserved reserved reserved +AVG reserved reserved reserved +BACKWARD non-reserved +BASE64 non-reserved non-reserved +BEFORE non-reserved non-reserved non-reserved +BEGIN non-reserved reserved reserved reserved +BEGIN_FRAME reserved +BEGIN_PARTITION reserved +BERNOULLI non-reserved non-reserved +BETWEEN non-reserved (cannot be function or type) reserved reserved reserved +BIGINT non-reserved (cannot be function or type) reserved reserved +BINARY reserved (can be function or type) reserved reserved +BIT non-reserved (cannot be function or type) reserved +BIT_LENGTH reserved +BLOB reserved reserved +BLOCKED non-reserved non-reserved +BOM non-reserved non-reserved +BOOLEAN non-reserved (cannot be function or type) reserved reserved +BOTH reserved reserved reserved reserved +BREADTH non-reserved non-reserved +BY non-reserved reserved reserved reserved +C non-reserved non-reserved non-reserved +CACHE non-reserved +CALL reserved reserved +CALLED non-reserved reserved reserved +CARDINALITY reserved reserved +CASCADE non-reserved non-reserved non-reserved reserved +CASCADED non-reserved reserved reserved reserved +CASE reserved reserved reserved reserved +CAST reserved reserved reserved reserved +CATALOG non-reserved non-reserved non-reserved reserved +CATALOG_NAME non-reserved non-reserved non-reserved +CEIL reserved reserved +CEILING reserved reserved +CHAIN non-reserved non-reserved non-reserved +CHAR non-reserved (cannot be function or type) reserved reserved reserved +CHARACTER non-reserved (cannot be function or type) reserved reserved reserved +CHARACTERISTICS non-reserved non-reserved non-reserved +CHARACTERS non-reserved non-reserved +CHARACTER_LENGTH reserved reserved reserved +CHARACTER_SET_CATALOG non-reserved non-reserved non-reserved +CHARACTER_SET_NAME non-reserved non-reserved non-reserved +CHARACTER_SET_SCHEMA non-reserved non-reserved non-reserved +CHAR_LENGTH reserved reserved reserved +CHECK reserved reserved reserved reserved +CHECKPOINT non-reserved +CLASS non-reserved +CLASS_ORIGIN non-reserved non-reserved non-reserved +CLOB reserved reserved +CLOSE non-reserved reserved reserved reserved +CLUSTER non-reserved +COALESCE non-reserved (cannot be function or type) reserved reserved reserved +COBOL non-reserved non-reserved non-reserved +COLLATE reserved reserved reserved reserved +COLLATION reserved (can be function or type) non-reserved non-reserved reserved +COLLATION_CATALOG non-reserved non-reserved non-reserved +COLLATION_NAME non-reserved non-reserved non-reserved +COLLATION_SCHEMA non-reserved non-reserved non-reserved +COLLECT reserved reserved +COLUMN reserved reserved reserved reserved +COLUMNS non-reserved non-reserved +COLUMN_NAME non-reserved non-reserved non-reserved +COMMAND_FUNCTION non-reserved non-reserved non-reserved +COMMAND_FUNCTION_CODE non-reserved non-reserved +COMMENT non-reserved +COMMENTS non-reserved +COMMIT non-reserved reserved reserved reserved +COMMITTED non-reserved non-reserved non-reserved non-reserved +CONCURRENTLY reserved (can be function or type) +CONDITION reserved reserved +CONDITION_NUMBER non-reserved non-reserved non-reserved +CONFIGURATION non-reserved +CONNECT reserved reserved reserved +CONNECTION non-reserved non-reserved non-reserved reserved +CONNECTION_NAME non-reserved non-reserved non-reserved +CONSTRAINT reserved reserved reserved reserved +CONSTRAINTS non-reserved non-reserved non-reserved reserved +CONSTRAINT_CATALOG non-reserved non-reserved non-reserved +CONSTRAINT_NAME non-reserved non-reserved non-reserved +CONSTRAINT_SCHEMA non-reserved non-reserved non-reserved +CONSTRUCTOR non-reserved non-reserved +CONTAINS reserved non-reserved +CONTENT non-reserved non-reserved non-reserved +CONTINUE non-reserved non-reserved non-reserved reserved +CONTROL non-reserved non-reserved +CONVERSION non-reserved +CONVERT reserved reserved reserved +COPY non-reserved +CORR reserved reserved +CORRESPONDING reserved reserved reserved +COST non-reserved +COUNT reserved reserved reserved +COVAR_POP reserved reserved +COVAR_SAMP reserved reserved +CREATE reserved reserved reserved reserved +CROSS reserved (can be function or type) reserved reserved reserved +CSV non-reserved +CUBE reserved reserved +CUME_DIST reserved reserved +CURRENT non-reserved reserved reserved reserved +CURRENT_CATALOG reserved reserved reserved +CURRENT_DATE reserved reserved reserved reserved +CURRENT_DEFAULT_TRANSFORM_GROUP reserved reserved +CURRENT_PATH reserved reserved +CURRENT_ROLE reserved reserved reserved +CURRENT_ROW reserved +CURRENT_SCHEMA reserved (can be function or type) reserved reserved +CURRENT_TIME reserved reserved reserved reserved +CURRENT_TIMESTAMP reserved reserved reserved reserved +CURRENT_TRANSFORM_GROUP_FOR_TYPE reserved reserved +CURRENT_USER reserved reserved reserved reserved +CURSOR non-reserved reserved reserved reserved +CURSOR_NAME non-reserved non-reserved non-reserved +CYCLE non-reserved reserved reserved +DATA non-reserved non-reserved non-reserved non-reserved +DATABASE non-reserved +DATALINK reserved reserved +DATE reserved reserved reserved +DATETIME_INTERVAL_CODE non-reserved non-reserved non-reserved +DATETIME_INTERVAL_PRECISION non-reserved non-reserved non-reserved +DAY non-reserved reserved reserved reserved +DB non-reserved non-reserved +DEALLOCATE non-reserved reserved reserved reserved +DEC non-reserved (cannot be function or type) reserved reserved reserved +DECIMAL non-reserved (cannot be function or type) reserved reserved reserved +DECLARE non-reserved reserved reserved reserved +DEFAULT reserved reserved reserved reserved +DEFAULTS non-reserved non-reserved non-reserved +DEFERRABLE reserved non-reserved non-reserved reserved +DEFERRED non-reserved non-reserved non-reserved reserved +DEFINED non-reserved non-reserved +DEFINER non-reserved non-reserved non-reserved +DEGREE non-reserved non-reserved +DELETE non-reserved reserved reserved reserved +DELIMITER non-reserved +DELIMITERS non-reserved +DENSE_RANK reserved reserved +DEPTH non-reserved non-reserved +DEREF reserved reserved +DERIVED non-reserved non-reserved +DESC reserved non-reserved non-reserved reserved +DESCRIBE reserved reserved reserved +DESCRIPTOR non-reserved non-reserved reserved +DETERMINISTIC reserved reserved +DIAGNOSTICS non-reserved non-reserved reserved +DICTIONARY non-reserved +DISABLE non-reserved +DISCARD non-reserved +DISCONNECT reserved reserved reserved +DISPATCH non-reserved non-reserved +DISTINCT reserved reserved reserved reserved +DLNEWCOPY reserved reserved +DLPREVIOUSCOPY reserved reserved +DLURLCOMPLETE reserved reserved +DLURLCOMPLETEONLY reserved reserved +DLURLCOMPLETEWRITE reserved reserved +DLURLPATH reserved reserved +DLURLPATHONLY reserved reserved +DLURLPATHWRITE reserved reserved +DLURLSCHEME reserved reserved +DLURLSERVER reserved reserved +DLVALUE reserved reserved +DO reserved +DOCUMENT non-reserved non-reserved non-reserved +DOMAIN non-reserved non-reserved non-reserved reserved +DOUBLE non-reserved reserved reserved reserved +DROP non-reserved reserved reserved reserved +DYNAMIC reserved reserved +DYNAMIC_FUNCTION non-reserved non-reserved non-reserved +DYNAMIC_FUNCTION_CODE non-reserved non-reserved +EACH non-reserved reserved reserved +ELEMENT reserved reserved +ELSE reserved reserved reserved reserved +EMPTY non-reserved non-reserved +ENABLE non-reserved +ENCODING non-reserved non-reserved non-reserved +ENCRYPTED non-reserved +END reserved reserved reserved reserved +END-EXEC reserved reserved reserved +END_FRAME reserved +END_PARTITION reserved +ENFORCED non-reserved +ENUM non-reserved +EQUALS reserved non-reserved +ESCAPE non-reserved reserved reserved reserved +EVENT non-reserved +EVERY reserved reserved +EXCEPT reserved reserved reserved reserved +EXCEPTION reserved +EXCLUDE non-reserved non-reserved non-reserved +EXCLUDING non-reserved non-reserved non-reserved +EXCLUSIVE non-reserved +EXEC reserved reserved reserved +EXECUTE non-reserved reserved reserved reserved +EXISTS non-reserved (cannot be function or type) reserved reserved reserved +EXP reserved reserved +EXPLAIN non-reserved +EXPRESSION non-reserved +EXTENSION non-reserved +EXTERNAL non-reserved reserved reserved reserved +EXTRACT non-reserved (cannot be function or type) reserved reserved reserved +FALSE reserved reserved reserved reserved +FAMILY non-reserved +FETCH reserved reserved reserved reserved +FILE non-reserved non-reserved +FILTER reserved reserved +FINAL non-reserved non-reserved +FIRST non-reserved non-reserved non-reserved reserved +FIRST_VALUE reserved reserved +FLAG non-reserved non-reserved +FLOAT non-reserved (cannot be function or type) reserved reserved reserved +FLOOR reserved reserved +FOLLOWING non-reserved non-reserved non-reserved +FOR reserved reserved reserved reserved +FORCE non-reserved +FOREIGN reserved reserved reserved reserved +FORTRAN non-reserved non-reserved non-reserved +FORWARD non-reserved +FOUND non-reserved non-reserved reserved +FRAME_ROW reserved +FREE reserved reserved +FREEZE reserved (can be function or type) +FROM reserved reserved reserved reserved +FS non-reserved non-reserved +FULL reserved (can be function or type) reserved reserved reserved +FUNCTION non-reserved reserved reserved +FUNCTIONS non-reserved +FUSION reserved reserved +G non-reserved non-reserved +GENERAL non-reserved non-reserved +GENERATED non-reserved non-reserved +GET reserved reserved reserved +GLOBAL non-reserved reserved reserved reserved +GO non-reserved non-reserved reserved +GOTO non-reserved non-reserved reserved +GRANT reserved reserved reserved reserved +GRANTED non-reserved non-reserved non-reserved +GREATEST non-reserved (cannot be function or type) +GROUP reserved reserved reserved reserved +GROUPING reserved reserved +GROUPS reserved +HANDLER non-reserved +HAVING reserved reserved reserved reserved +HEADER non-reserved +HEX non-reserved non-reserved +HIERARCHY non-reserved non-reserved +HOLD non-reserved reserved reserved +HOUR non-reserved reserved reserved reserved +ID non-reserved non-reserved +IDENTITY non-reserved reserved reserved reserved +IF non-reserved +IGNORE non-reserved non-reserved +ILIKE reserved (can be function or type) +IMMEDIATE non-reserved non-reserved non-reserved reserved +IMMEDIATELY non-reserved +IMMUTABLE non-reserved +IMPLEMENTATION non-reserved non-reserved +IMPLICIT non-reserved +IMPORT reserved reserved +IN reserved reserved reserved reserved +INCLUDING non-reserved non-reserved non-reserved +INCREMENT non-reserved non-reserved non-reserved +INDENT non-reserved non-reserved +INDEX non-reserved +INDEXES non-reserved +INDICATOR reserved reserved reserved +INHERIT non-reserved +INHERITS non-reserved +INITIALLY reserved non-reserved non-reserved reserved +INLINE non-reserved +INNER reserved (can be function or type) reserved reserved reserved +INOUT non-reserved (cannot be function or type) reserved reserved +INPUT non-reserved non-reserved non-reserved reserved +INSENSITIVE non-reserved reserved reserved reserved +INSERT non-reserved reserved reserved reserved +INSTANCE non-reserved non-reserved +INSTANTIABLE non-reserved non-reserved +INSTEAD non-reserved non-reserved non-reserved +INT non-reserved (cannot be function or type) reserved reserved reserved +INTEGER non-reserved (cannot be function or type) reserved reserved reserved +INTEGRITY non-reserved non-reserved +INTERSECT reserved reserved reserved reserved +INTERSECTION reserved reserved +INTERVAL non-reserved (cannot be function or type) reserved reserved reserved +INTO reserved reserved reserved reserved +INVOKER non-reserved non-reserved non-reserved +IS reserved (can be function or type) reserved reserved reserved +ISNULL reserved (can be function or type) +ISOLATION non-reserved non-reserved non-reserved reserved +JOIN reserved (can be function or type) reserved reserved reserved +K non-reserved non-reserved +KEY non-reserved non-reserved non-reserved reserved +KEY_MEMBER non-reserved non-reserved +KEY_TYPE non-reserved non-reserved +LABEL non-reserved +LAG reserved reserved +LANGUAGE non-reserved reserved reserved reserved +LARGE non-reserved reserved reserved +LAST non-reserved non-reserved non-reserved reserved +LAST_VALUE reserved reserved +LATERAL reserved reserved reserved +LC_COLLATE non-reserved +LC_CTYPE non-reserved +LEAD reserved reserved +LEADING reserved reserved reserved reserved +LEAKPROOF non-reserved +LEAST non-reserved (cannot be function or type) +LEFT reserved (can be function or type) reserved reserved reserved +LENGTH non-reserved non-reserved non-reserved +LEVEL non-reserved non-reserved non-reserved reserved +LIBRARY non-reserved non-reserved +LIKE reserved (can be function or type) reserved reserved reserved +LIKE_REGEX reserved reserved +LIMIT reserved non-reserved non-reserved +LINK non-reserved non-reserved +LISTEN non-reserved +LN reserved reserved +LOAD non-reserved +LOCAL non-reserved reserved reserved reserved +LOCALTIME reserved reserved reserved +LOCALTIMESTAMP reserved reserved reserved +LOCATION non-reserved non-reserved non-reserved +LOCATOR non-reserved non-reserved +LOCK non-reserved +LOWER reserved reserved reserved +M non-reserved non-reserved +MAP non-reserved non-reserved +MAPPING non-reserved non-reserved non-reserved +MATCH non-reserved reserved reserved reserved +MATCHED non-reserved non-reserved +MATERIALIZED non-reserved +MAX reserved reserved reserved +MAXVALUE non-reserved non-reserved non-reserved +MAX_CARDINALITY reserved +MEMBER reserved reserved +MERGE reserved reserved +MESSAGE_LENGTH non-reserved non-reserved non-reserved +MESSAGE_OCTET_LENGTH non-reserved non-reserved non-reserved +MESSAGE_TEXT non-reserved non-reserved non-reserved +METHOD reserved reserved +MIN reserved reserved reserved +MINUTE non-reserved reserved reserved reserved +MINVALUE non-reserved non-reserved non-reserved +MOD reserved reserved +MODE non-reserved +MODIFIES reserved reserved +MODULE reserved reserved reserved +MONTH non-reserved reserved reserved reserved +MORE non-reserved non-reserved non-reserved +MOVE non-reserved +MULTISET reserved reserved +MUMPS non-reserved non-reserved non-reserved +NAME non-reserved non-reserved non-reserved non-reserved +NAMES non-reserved non-reserved non-reserved reserved +NAMESPACE non-reserved non-reserved +NATIONAL non-reserved (cannot be function or type) reserved reserved reserved +NATURAL reserved (can be function or type) reserved reserved reserved +NCHAR non-reserved (cannot be function or type) reserved reserved reserved +NCLOB reserved reserved +NESTING non-reserved non-reserved +NEW reserved reserved +NEXT non-reserved non-reserved non-reserved reserved +NFC non-reserved non-reserved +NFD non-reserved non-reserved +NFKC non-reserved non-reserved +NFKD non-reserved non-reserved +NIL non-reserved non-reserved +NO non-reserved reserved reserved reserved +NONE non-reserved (cannot be function or type) reserved reserved +NORMALIZE reserved reserved +NORMALIZED non-reserved non-reserved +NOT reserved reserved reserved reserved +NOTHING non-reserved +NOTIFY non-reserved +NOTNULL reserved (can be function or type) +NOWAIT non-reserved +NTH_VALUE reserved reserved +NTILE reserved reserved +NULL reserved reserved reserved reserved +NULLABLE non-reserved non-reserved non-reserved +NULLIF non-reserved (cannot be function or type) reserved reserved reserved +NULLS non-reserved non-reserved non-reserved +NUMBER non-reserved non-reserved non-reserved +NUMERIC non-reserved (cannot be function or type) reserved reserved reserved +OBJECT non-reserved non-reserved non-reserved +OCCURRENCES_REGEX reserved reserved +OCTETS non-reserved non-reserved +OCTET_LENGTH reserved reserved reserved +OF non-reserved reserved reserved reserved +OFF non-reserved non-reserved non-reserved +OFFSET reserved reserved reserved +OIDS non-reserved +OLD reserved reserved +ON reserved reserved reserved reserved +ONLY reserved reserved reserved reserved +OPEN reserved reserved reserved +OPERATOR non-reserved +OPTION non-reserved non-reserved non-reserved reserved +OPTIONS non-reserved non-reserved non-reserved +OR reserved reserved reserved reserved +ORDER reserved reserved reserved reserved +ORDERING non-reserved non-reserved +ORDINALITY non-reserved non-reserved +OTHERS non-reserved non-reserved +OUT non-reserved (cannot be function or type) reserved reserved +OUTER reserved (can be function or type) reserved reserved reserved +OUTPUT non-reserved non-reserved reserved +OVER reserved (can be function or type) reserved reserved +OVERLAPS reserved (can be function or type) reserved reserved reserved +OVERLAY non-reserved (cannot be function or type) reserved reserved +OVERRIDING non-reserved non-reserved +OWNED non-reserved +OWNER non-reserved +P non-reserved non-reserved +PAD non-reserved non-reserved reserved +PARAMETER reserved reserved +PARAMETER_MODE non-reserved non-reserved +PARAMETER_NAME non-reserved non-reserved +PARAMETER_ORDINAL_POSITION non-reserved non-reserved +PARAMETER_SPECIFIC_CATALOG non-reserved non-reserved +PARAMETER_SPECIFIC_NAME non-reserved non-reserved +PARAMETER_SPECIFIC_SCHEMA non-reserved non-reserved +PARSER non-reserved +PARTIAL non-reserved non-reserved non-reserved reserved +PARTITION non-reserved reserved reserved +PASCAL non-reserved non-reserved non-reserved +PASSING non-reserved non-reserved non-reserved +PASSTHROUGH non-reserved non-reserved +PASSWORD non-reserved +PATH non-reserved non-reserved +PERCENT reserved +PERCENTILE_CONT reserved reserved +PERCENTILE_DISC reserved reserved +PERCENT_RANK reserved reserved +PERIOD reserved +PERMISSION non-reserved non-reserved +PLACING reserved non-reserved non-reserved +PLANS non-reserved +PLI non-reserved non-reserved non-reserved +PORTION reserved +POSITION non-reserved (cannot be function or type) reserved reserved reserved +POSITION_REGEX reserved reserved +POWER reserved reserved +PRECEDES reserved +PRECEDING non-reserved non-reserved non-reserved +PRECISION non-reserved (cannot be function or type) reserved reserved reserved +PREPARE non-reserved reserved reserved reserved +PREPARED non-reserved +PRESERVE non-reserved non-reserved non-reserved reserved +PRIMARY reserved reserved reserved reserved +PRIOR non-reserved non-reserved non-reserved reserved +PRIVILEGES non-reserved non-reserved non-reserved reserved +PROCEDURAL non-reserved +PROCEDURE non-reserved reserved reserved reserved +PROGRAM non-reserved +PUBLIC non-reserved non-reserved reserved +QUOTE non-reserved +RANGE non-reserved reserved reserved +RANK reserved reserved +READ non-reserved non-reserved non-reserved reserved +READS reserved reserved +REAL non-reserved (cannot be function or type) reserved reserved reserved +REASSIGN non-reserved +RECHECK non-reserved +RECOVERY non-reserved non-reserved +RECURSIVE non-reserved reserved reserved +REF non-reserved reserved reserved +REFERENCES reserved reserved reserved reserved +REFERENCING reserved reserved +REFRESH non-reserved +REGR_AVGX reserved reserved +REGR_AVGY reserved reserved +REGR_COUNT reserved reserved +REGR_INTERCEPT reserved reserved +REGR_R2 reserved reserved +REGR_SLOPE reserved reserved +REGR_SXX reserved reserved +REGR_SXY reserved reserved +REGR_SYY reserved reserved +REINDEX non-reserved +RELATIVE non-reserved non-reserved non-reserved reserved +RELEASE non-reserved reserved reserved +RENAME non-reserved +REPEATABLE non-reserved non-reserved non-reserved non-reserved +REPLACE non-reserved +REPLICA non-reserved +REQUIRING non-reserved non-reserved +RESET non-reserved +RESPECT non-reserved non-reserved +RESTART non-reserved non-reserved non-reserved +RESTORE non-reserved non-reserved +RESTRICT non-reserved non-reserved non-reserved reserved +RESULT reserved reserved +RETURN reserved reserved +RETURNED_CARDINALITY non-reserved non-reserved +RETURNED_LENGTH non-reserved non-reserved non-reserved +RETURNED_OCTET_LENGTH non-reserved non-reserved non-reserved +RETURNED_SQLSTATE non-reserved non-reserved non-reserved +RETURNING reserved non-reserved non-reserved +RETURNS non-reserved reserved reserved +REVOKE non-reserved reserved reserved reserved +RIGHT reserved (can be function or type) reserved reserved reserved +ROLE non-reserved non-reserved non-reserved +ROLLBACK non-reserved reserved reserved reserved +ROLLUP reserved reserved +ROUTINE non-reserved non-reserved +ROUTINE_CATALOG non-reserved non-reserved +ROUTINE_NAME non-reserved non-reserved +ROUTINE_SCHEMA non-reserved non-reserved +ROW non-reserved (cannot be function or type) reserved reserved +ROWS non-reserved reserved reserved reserved +ROW_COUNT non-reserved non-reserved non-reserved +ROW_NUMBER reserved reserved +RULE non-reserved +SAVEPOINT non-reserved reserved reserved +SCALE non-reserved non-reserved non-reserved +SCHEMA non-reserved non-reserved non-reserved reserved +SCHEMA_NAME non-reserved non-reserved non-reserved +SCOPE reserved reserved +SCOPE_CATALOG non-reserved non-reserved +SCOPE_NAME non-reserved non-reserved +SCOPE_SCHEMA non-reserved non-reserved +SCROLL non-reserved reserved reserved reserved +SEARCH non-reserved reserved reserved +SECOND non-reserved reserved reserved reserved +SECTION non-reserved non-reserved reserved +SECURITY non-reserved non-reserved non-reserved +SELECT reserved reserved reserved reserved +SELECTIVE non-reserved non-reserved +SELF non-reserved non-reserved +SENSITIVE reserved reserved +SEQUENCE non-reserved non-reserved non-reserved +SEQUENCES non-reserved +SERIALIZABLE non-reserved non-reserved non-reserved non-reserved +SERVER non-reserved non-reserved non-reserved +SERVER_NAME non-reserved non-reserved non-reserved +SESSION non-reserved non-reserved non-reserved reserved +SESSION_USER reserved reserved reserved reserved +SET non-reserved reserved reserved reserved +SETOF non-reserved (cannot be function or type) +SETS non-reserved non-reserved +SHARE non-reserved +SHOW non-reserved +SIMILAR reserved (can be function or type) reserved reserved +SIMPLE non-reserved non-reserved non-reserved +SIZE non-reserved non-reserved reserved +SMALLINT non-reserved (cannot be function or type) reserved reserved reserved +SNAPSHOT non-reserved +SOME reserved reserved reserved reserved +SOURCE non-reserved non-reserved +SPACE non-reserved non-reserved reserved +SPECIFIC reserved reserved +SPECIFICTYPE reserved reserved +SPECIFIC_NAME non-reserved non-reserved +SQL reserved reserved reserved +SQLCODE reserved +SQLERROR reserved +SQLEXCEPTION reserved reserved +SQLSTATE reserved reserved reserved +SQLWARNING reserved reserved +SQRT reserved reserved +STABLE non-reserved +STANDALONE non-reserved non-reserved non-reserved +START non-reserved reserved reserved +STATE non-reserved non-reserved +STATEMENT non-reserved non-reserved non-reserved +STATIC reserved reserved +STATISTICS non-reserved +STDDEV_POP reserved reserved +STDDEV_SAMP reserved reserved +STDIN non-reserved +STDOUT non-reserved +STORAGE non-reserved +STRICT non-reserved +STRIP non-reserved non-reserved non-reserved +STRUCTURE non-reserved non-reserved +STYLE non-reserved non-reserved +SUBCLASS_ORIGIN non-reserved non-reserved non-reserved +SUBMULTISET reserved reserved +SUBSTRING non-reserved (cannot be function or type) reserved reserved reserved +SUBSTRING_REGEX reserved reserved +SUCCEEDS reserved +SUM reserved reserved reserved +SYMMETRIC reserved reserved reserved +SYSID non-reserved +SYSTEM non-reserved reserved reserved +SYSTEM_TIME reserved +SYSTEM_USER reserved reserved reserved +T non-reserved non-reserved +TABLE reserved reserved reserved reserved +TABLES non-reserved +TABLESAMPLE reserved reserved +TABLESPACE non-reserved +TABLE_NAME non-reserved non-reserved non-reserved +TEMP non-reserved +TEMPLATE non-reserved +TEMPORARY non-reserved non-reserved non-reserved reserved +TEXT non-reserved +THEN reserved reserved reserved reserved +TIES non-reserved non-reserved +TIME non-reserved (cannot be function or type) reserved reserved reserved +TIMESTAMP non-reserved (cannot be function or type) reserved reserved reserved +TIMEZONE_HOUR reserved reserved reserved +TIMEZONE_MINUTE reserved reserved reserved +TO reserved reserved reserved reserved +TOKEN non-reserved non-reserved +TOP_LEVEL_COUNT non-reserved non-reserved +TRAILING reserved reserved reserved reserved +TRANSACTION non-reserved non-reserved non-reserved reserved +TRANSACTIONS_COMMITTED non-reserved non-reserved +TRANSACTIONS_ROLLED_BACK non-reserved non-reserved +TRANSACTION_ACTIVE non-reserved non-reserved +TRANSFORM non-reserved non-reserved +TRANSFORMS non-reserved non-reserved +TRANSLATE reserved reserved reserved +TRANSLATE_REGEX reserved reserved +TRANSLATION reserved reserved reserved +TREAT non-reserved (cannot be function or type) reserved reserved +TRIGGER non-reserved reserved reserved +TRIGGER_CATALOG non-reserved non-reserved +TRIGGER_NAME non-reserved non-reserved +TRIGGER_SCHEMA non-reserved non-reserved +TRIM non-reserved (cannot be function or type) reserved reserved reserved +TRIM_ARRAY reserved reserved +TRUE reserved reserved reserved reserved +TRUNCATE non-reserved reserved reserved +TRUSTED non-reserved +TYPE non-reserved non-reserved non-reserved non-reserved +TYPES non-reserved +UESCAPE reserved reserved +UNBOUNDED non-reserved non-reserved non-reserved +UNCOMMITTED non-reserved non-reserved non-reserved non-reserved +UNDER non-reserved non-reserved +UNENCRYPTED non-reserved +UNION reserved reserved reserved reserved +UNIQUE reserved reserved reserved reserved +UNKNOWN non-reserved reserved reserved reserved +UNLINK non-reserved non-reserved +UNLISTEN non-reserved +UNLOGGED non-reserved +UNNAMED non-reserved non-reserved non-reserved +UNNEST reserved reserved +UNTIL non-reserved +UNTYPED non-reserved non-reserved +UPDATE non-reserved reserved reserved reserved +UPPER reserved reserved reserved +URI non-reserved non-reserved +USAGE non-reserved non-reserved reserved +USER reserved reserved reserved reserved +USER_DEFINED_TYPE_CATALOG non-reserved non-reserved +USER_DEFINED_TYPE_CODE non-reserved non-reserved +USER_DEFINED_TYPE_NAME non-reserved non-reserved +USER_DEFINED_TYPE_SCHEMA non-reserved non-reserved +USING reserved reserved reserved reserved +VACUUM non-reserved +VALID non-reserved non-reserved non-reserved +VALIDATE non-reserved +VALIDATOR non-reserved +VALUE non-reserved reserved reserved reserved +VALUES non-reserved (cannot be function or type) reserved reserved reserved +VALUE_OF reserved +VARBINARY reserved reserved +VARCHAR non-reserved (cannot be function or type) reserved reserved reserved +VARIADIC reserved +VARYING non-reserved reserved reserved reserved +VAR_POP reserved reserved +VAR_SAMP reserved reserved +VERBOSE reserved (can be function or type) +VERSION non-reserved non-reserved non-reserved +VERSIONING reserved +VIEW non-reserved non-reserved non-reserved reserved +VOLATILE non-reserved +WHEN reserved reserved reserved reserved +WHENEVER reserved reserved reserved +WHERE reserved reserved reserved reserved +WHITESPACE non-reserved non-reserved non-reserved +WIDTH_BUCKET reserved reserved +WINDOW reserved reserved reserved +WITH reserved reserved reserved reserved +WITHIN reserved reserved +WITHOUT non-reserved reserved reserved +WORK non-reserved non-reserved non-reserved reserved +WRAPPER non-reserved non-reserved non-reserved +WRITE non-reserved non-reserved non-reserved reserved +XML non-reserved reserved reserved +XMLAGG reserved reserved +XMLATTRIBUTES non-reserved (cannot be function or type) reserved reserved +XMLBINARY reserved reserved +XMLCAST reserved reserved +XMLCOMMENT reserved reserved +XMLCONCAT non-reserved (cannot be function or type) reserved reserved +XMLDECLARATION non-reserved non-reserved +XMLDOCUMENT reserved reserved +XMLELEMENT non-reserved (cannot be function or type) reserved reserved +XMLEXISTS non-reserved (cannot be function or type) reserved reserved +XMLFOREST non-reserved (cannot be function or type) reserved reserved +XMLITERATE reserved reserved +XMLNAMESPACES reserved reserved +XMLPARSE non-reserved (cannot be function or type) reserved reserved +XMLPI non-reserved (cannot be function or type) reserved reserved +XMLQUERY reserved reserved +XMLROOT non-reserved (cannot be function or type) +XMLSCHEMA non-reserved non-reserved +XMLSERIALIZE non-reserved (cannot be function or type) reserved reserved +XMLTABLE reserved reserved +XMLTEXT reserved reserved +XMLVALIDATE reserved reserved +YEAR non-reserved reserved reserved reserved +YES non-reserved non-reserved non-reserved +ZONE non-reserved non-reserved non-reserved reserved \ No newline at end of file diff --git a/vendor/xorm.io/xorm/dialects/postgres.go b/vendor/xorm.io/xorm/dialects/postgres.go new file mode 100644 index 0000000000..1996c49dea --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/postgres.go @@ -0,0 +1,1349 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +// from http://www.postgresql.org/docs/current/static/sql-keywords-appendix.html +var ( + postgresReservedWords = map[string]bool{ + "A": true, + "ABORT": true, + "ABS": true, + "ABSENT": true, + "ABSOLUTE": true, + "ACCESS": true, + "ACCORDING": true, + "ACTION": true, + "ADA": true, + "ADD": true, + "ADMIN": true, + "AFTER": true, + "AGGREGATE": true, + "ALL": true, + "ALLOCATE": true, + "ALSO": true, + "ALTER": true, + "ALWAYS": true, + "ANALYSE": true, + "ANALYZE": true, + "AND": true, + "ANY": true, + "ARE": true, + "ARRAY": true, + "ARRAY_AGG": true, + "ARRAY_MAX_CARDINALITY": true, + "AS": true, + "ASC": true, + "ASENSITIVE": true, + "ASSERTION": true, + "ASSIGNMENT": true, + "ASYMMETRIC": true, + "AT": true, + "ATOMIC": true, + "ATTRIBUTE": true, + "ATTRIBUTES": true, + "AUTHORIZATION": true, + "AVG": true, + "BACKWARD": true, + "BASE64": true, + "BEFORE": true, + "BEGIN": true, + "BEGIN_FRAME": true, + "BEGIN_PARTITION": true, + "BERNOULLI": true, + "BETWEEN": true, + "BIGINT": true, + "BINARY": true, + "BIT": true, + "BIT_LENGTH": true, + "BLOB": true, + "BLOCKED": true, + "BOM": true, + "BOOLEAN": true, + "BOTH": true, + "BREADTH": true, + "BY": true, + "C": true, + "CACHE": true, + "CALL": true, + "CALLED": true, + "CARDINALITY": true, + "CASCADE": true, + "CASCADED": true, + "CASE": true, + "CAST": true, + "CATALOG": true, + "CATALOG_NAME": true, + "CEIL": true, + "CEILING": true, + "CHAIN": true, + "CHAR": true, + "CHARACTER": true, + "CHARACTERISTICS": true, + "CHARACTERS": true, + "CHARACTER_LENGTH": true, + "CHARACTER_SET_CATALOG": true, + "CHARACTER_SET_NAME": true, + "CHARACTER_SET_SCHEMA": true, + "CHAR_LENGTH": true, + "CHECK": true, + "CHECKPOINT": true, + "CLASS": true, + "CLASS_ORIGIN": true, + "CLOB": true, + "CLOSE": true, + "CLUSTER": true, + "COALESCE": true, + "COBOL": true, + "COLLATE": true, + "COLLATION": true, + "COLLATION_CATALOG": true, + "COLLATION_NAME": true, + "COLLATION_SCHEMA": true, + "COLLECT": true, + "COLUMN": true, + "COLUMNS": true, + "COLUMN_NAME": true, + "COMMAND_FUNCTION": true, + "COMMAND_FUNCTION_CODE": true, + "COMMENT": true, + "COMMENTS": true, + "COMMIT": true, + "COMMITTED": true, + "CONCURRENTLY": true, + "CONDITION": true, + "CONDITION_NUMBER": true, + "CONFIGURATION": true, + "CONNECT": true, + "CONNECTION": true, + "CONNECTION_NAME": true, + "CONSTRAINT": true, + "CONSTRAINTS": true, + "CONSTRAINT_CATALOG": true, + "CONSTRAINT_NAME": true, + "CONSTRAINT_SCHEMA": true, + "CONSTRUCTOR": true, + "CONTAINS": true, + "CONTENT": true, + "CONTINUE": true, + "CONTROL": true, + "CONVERSION": true, + "CONVERT": true, + "COPY": true, + "CORR": true, + "CORRESPONDING": true, + "COST": true, + "COUNT": true, + "COVAR_POP": true, + "COVAR_SAMP": true, + "CREATE": true, + "CROSS": true, + "CSV": true, + "CUBE": true, + "CUME_DIST": true, + "CURRENT": true, + "CURRENT_CATALOG": true, + "CURRENT_DATE": true, + "CURRENT_DEFAULT_TRANSFORM_GROUP": true, + "CURRENT_PATH": true, + "CURRENT_ROLE": true, + "CURRENT_ROW": true, + "CURRENT_SCHEMA": true, + "CURRENT_TIME": true, + "CURRENT_TIMESTAMP": true, + "CURRENT_TRANSFORM_GROUP_FOR_TYPE": true, + "CURRENT_USER": true, + "CURSOR": true, + "CURSOR_NAME": true, + "CYCLE": true, + "DATA": true, + "DATABASE": true, + "DATALINK": true, + "DATE": true, + "DATETIME_INTERVAL_CODE": true, + "DATETIME_INTERVAL_PRECISION": true, + "DAY": true, + "DB": true, + "DEALLOCATE": true, + "DEC": true, + "DECIMAL": true, + "DECLARE": true, + "DEFAULT": true, + "DEFAULTS": true, + "DEFERRABLE": true, + "DEFERRED": true, + "DEFINED": true, + "DEFINER": true, + "DEGREE": true, + "DELETE": true, + "DELIMITER": true, + "DELIMITERS": true, + "DENSE_RANK": true, + "DEPTH": true, + "DEREF": true, + "DERIVED": true, + "DESC": true, + "DESCRIBE": true, + "DESCRIPTOR": true, + "DETERMINISTIC": true, + "DIAGNOSTICS": true, + "DICTIONARY": true, + "DISABLE": true, + "DISCARD": true, + "DISCONNECT": true, + "DISPATCH": true, + "DISTINCT": true, + "DLNEWCOPY": true, + "DLPREVIOUSCOPY": true, + "DLURLCOMPLETE": true, + "DLURLCOMPLETEONLY": true, + "DLURLCOMPLETEWRITE": true, + "DLURLPATH": true, + "DLURLPATHONLY": true, + "DLURLPATHWRITE": true, + "DLURLSCHEME": true, + "DLURLSERVER": true, + "DLVALUE": true, + "DO": true, + "DOCUMENT": true, + "DOMAIN": true, + "DOUBLE": true, + "DROP": true, + "DYNAMIC": true, + "DYNAMIC_FUNCTION": true, + "DYNAMIC_FUNCTION_CODE": true, + "EACH": true, + "ELEMENT": true, + "ELSE": true, + "EMPTY": true, + "ENABLE": true, + "ENCODING": true, + "ENCRYPTED": true, + "END": true, + "END-EXEC": true, + "END_FRAME": true, + "END_PARTITION": true, + "ENFORCED": true, + "ENUM": true, + "EQUALS": true, + "ESCAPE": true, + "EVENT": true, + "EVERY": true, + "EXCEPT": true, + "EXCEPTION": true, + "EXCLUDE": true, + "EXCLUDING": true, + "EXCLUSIVE": true, + "EXEC": true, + "EXECUTE": true, + "EXISTS": true, + "EXP": true, + "EXPLAIN": true, + "EXPRESSION": true, + "EXTENSION": true, + "EXTERNAL": true, + "EXTRACT": true, + "FALSE": true, + "FAMILY": true, + "FETCH": true, + "FILE": true, + "FILTER": true, + "FINAL": true, + "FIRST": true, + "FIRST_VALUE": true, + "FLAG": true, + "FLOAT": true, + "FLOOR": true, + "FOLLOWING": true, + "FOR": true, + "FORCE": true, + "FOREIGN": true, + "FORTRAN": true, + "FORWARD": true, + "FOUND": true, + "FRAME_ROW": true, + "FREE": true, + "FREEZE": true, + "FROM": true, + "FS": true, + "FULL": true, + "FUNCTION": true, + "FUNCTIONS": true, + "FUSION": true, + "G": true, + "GENERAL": true, + "GENERATED": true, + "GET": true, + "GLOBAL": true, + "GO": true, + "GOTO": true, + "GRANT": true, + "GRANTED": true, + "GREATEST": true, + "GROUP": true, + "GROUPING": true, + "GROUPS": true, + "HANDLER": true, + "HAVING": true, + "HEADER": true, + "HEX": true, + "HIERARCHY": true, + "HOLD": true, + "HOUR": true, + "ID": true, + "IDENTITY": true, + "IF": true, + "IGNORE": true, + "ILIKE": true, + "IMMEDIATE": true, + "IMMEDIATELY": true, + "IMMUTABLE": true, + "IMPLEMENTATION": true, + "IMPLICIT": true, + "IMPORT": true, + "IN": true, + "INCLUDING": true, + "INCREMENT": true, + "INDENT": true, + "INDEX": true, + "INDEXES": true, + "INDICATOR": true, + "INHERIT": true, + "INHERITS": true, + "INITIALLY": true, + "INLINE": true, + "INNER": true, + "INOUT": true, + "INPUT": true, + "INSENSITIVE": true, + "INSERT": true, + "INSTANCE": true, + "INSTANTIABLE": true, + "INSTEAD": true, + "INT": true, + "INTEGER": true, + "INTEGRITY": true, + "INTERSECT": true, + "INTERSECTION": true, + "INTERVAL": true, + "INTO": true, + "INVOKER": true, + "IS": true, + "ISNULL": true, + "ISOLATION": true, + "JOIN": true, + "K": true, + "KEY": true, + "KEY_MEMBER": true, + "KEY_TYPE": true, + "LABEL": true, + "LAG": true, + "LANGUAGE": true, + "LARGE": true, + "LAST": true, + "LAST_VALUE": true, + "LATERAL": true, + "LC_COLLATE": true, + "LC_CTYPE": true, + "LEAD": true, + "LEADING": true, + "LEAKPROOF": true, + "LEAST": true, + "LEFT": true, + "LENGTH": true, + "LEVEL": true, + "LIBRARY": true, + "LIKE": true, + "LIKE_REGEX": true, + "LIMIT": true, + "LINK": true, + "LISTEN": true, + "LN": true, + "LOAD": true, + "LOCAL": true, + "LOCALTIME": true, + "LOCALTIMESTAMP": true, + "LOCATION": true, + "LOCATOR": true, + "LOCK": true, + "LOWER": true, + "M": true, + "MAP": true, + "MAPPING": true, + "MATCH": true, + "MATCHED": true, + "MATERIALIZED": true, + "MAX": true, + "MAXVALUE": true, + "MAX_CARDINALITY": true, + "MEMBER": true, + "MERGE": true, + "MESSAGE_LENGTH": true, + "MESSAGE_OCTET_LENGTH": true, + "MESSAGE_TEXT": true, + "METHOD": true, + "MIN": true, + "MINUTE": true, + "MINVALUE": true, + "MOD": true, + "MODE": true, + "MODIFIES": true, + "MODULE": true, + "MONTH": true, + "MORE": true, + "MOVE": true, + "MULTISET": true, + "MUMPS": true, + "NAME": true, + "NAMES": true, + "NAMESPACE": true, + "NATIONAL": true, + "NATURAL": true, + "NCHAR": true, + "NCLOB": true, + "NESTING": true, + "NEW": true, + "NEXT": true, + "NFC": true, + "NFD": true, + "NFKC": true, + "NFKD": true, + "NIL": true, + "NO": true, + "NONE": true, + "NORMALIZE": true, + "NORMALIZED": true, + "NOT": true, + "NOTHING": true, + "NOTIFY": true, + "NOTNULL": true, + "NOWAIT": true, + "NTH_VALUE": true, + "NTILE": true, + "NULL": true, + "NULLABLE": true, + "NULLIF": true, + "NULLS": true, + "NUMBER": true, + "NUMERIC": true, + "OBJECT": true, + "OCCURRENCES_REGEX": true, + "OCTETS": true, + "OCTET_LENGTH": true, + "OF": true, + "OFF": true, + "OFFSET": true, + "OIDS": true, + "OLD": true, + "ON": true, + "ONLY": true, + "OPEN": true, + "OPERATOR": true, + "OPTION": true, + "OPTIONS": true, + "OR": true, + "ORDER": true, + "ORDERING": true, + "ORDINALITY": true, + "OTHERS": true, + "OUT": true, + "OUTER": true, + "OUTPUT": true, + "OVER": true, + "OVERLAPS": true, + "OVERLAY": true, + "OVERRIDING": true, + "OWNED": true, + "OWNER": true, + "P": true, + "PAD": true, + "PARAMETER": true, + "PARAMETER_MODE": true, + "PARAMETER_NAME": true, + "PARAMETER_ORDINAL_POSITION": true, + "PARAMETER_SPECIFIC_CATALOG": true, + "PARAMETER_SPECIFIC_NAME": true, + "PARAMETER_SPECIFIC_SCHEMA": true, + "PARSER": true, + "PARTIAL": true, + "PARTITION": true, + "PASCAL": true, + "PASSING": true, + "PASSTHROUGH": true, + "PASSWORD": true, + "PATH": true, + "PERCENT": true, + "PERCENTILE_CONT": true, + "PERCENTILE_DISC": true, + "PERCENT_RANK": true, + "PERIOD": true, + "PERMISSION": true, + "PLACING": true, + "PLANS": true, + "PLI": true, + "PORTION": true, + "POSITION": true, + "POSITION_REGEX": true, + "POWER": true, + "PRECEDES": true, + "PRECEDING": true, + "PRECISION": true, + "PREPARE": true, + "PREPARED": true, + "PRESERVE": true, + "PRIMARY": true, + "PRIOR": true, + "PRIVILEGES": true, + "PROCEDURAL": true, + "PROCEDURE": true, + "PROGRAM": true, + "PUBLIC": true, + "QUOTE": true, + "RANGE": true, + "RANK": true, + "READ": true, + "READS": true, + "REAL": true, + "REASSIGN": true, + "RECHECK": true, + "RECOVERY": true, + "RECURSIVE": true, + "REF": true, + "REFERENCES": true, + "REFERENCING": true, + "REFRESH": true, + "REGR_AVGX": true, + "REGR_AVGY": true, + "REGR_COUNT": true, + "REGR_INTERCEPT": true, + "REGR_R2": true, + "REGR_SLOPE": true, + "REGR_SXX": true, + "REGR_SXY": true, + "REGR_SYY": true, + "REINDEX": true, + "RELATIVE": true, + "RELEASE": true, + "RENAME": true, + "REPEATABLE": true, + "REPLACE": true, + "REPLICA": true, + "REQUIRING": true, + "RESET": true, + "RESPECT": true, + "RESTART": true, + "RESTORE": true, + "RESTRICT": true, + "RESULT": true, + "RETURN": true, + "RETURNED_CARDINALITY": true, + "RETURNED_LENGTH": true, + "RETURNED_OCTET_LENGTH": true, + "RETURNED_SQLSTATE": true, + "RETURNING": true, + "RETURNS": true, + "REVOKE": true, + "RIGHT": true, + "ROLE": true, + "ROLLBACK": true, + "ROLLUP": true, + "ROUTINE": true, + "ROUTINE_CATALOG": true, + "ROUTINE_NAME": true, + "ROUTINE_SCHEMA": true, + "ROW": true, + "ROWS": true, + "ROW_COUNT": true, + "ROW_NUMBER": true, + "RULE": true, + "SAVEPOINT": true, + "SCALE": true, + "SCHEMA": true, + "SCHEMA_NAME": true, + "SCOPE": true, + "SCOPE_CATALOG": true, + "SCOPE_NAME": true, + "SCOPE_SCHEMA": true, + "SCROLL": true, + "SEARCH": true, + "SECOND": true, + "SECTION": true, + "SECURITY": true, + "SELECT": true, + "SELECTIVE": true, + "SELF": true, + "SENSITIVE": true, + "SEQUENCE": true, + "SEQUENCES": true, + "SERIALIZABLE": true, + "SERVER": true, + "SERVER_NAME": true, + "SESSION": true, + "SESSION_USER": true, + "SET": true, + "SETOF": true, + "SETS": true, + "SHARE": true, + "SHOW": true, + "SIMILAR": true, + "SIMPLE": true, + "SIZE": true, + "SMALLINT": true, + "SNAPSHOT": true, + "SOME": true, + "SOURCE": true, + "SPACE": true, + "SPECIFIC": true, + "SPECIFICTYPE": true, + "SPECIFIC_NAME": true, + "SQL": true, + "SQLCODE": true, + "SQLERROR": true, + "SQLEXCEPTION": true, + "SQLSTATE": true, + "SQLWARNING": true, + "SQRT": true, + "STABLE": true, + "STANDALONE": true, + "START": true, + "STATE": true, + "STATEMENT": true, + "STATIC": true, + "STATISTICS": true, + "STDDEV_POP": true, + "STDDEV_SAMP": true, + "STDIN": true, + "STDOUT": true, + "STORAGE": true, + "STRICT": true, + "STRIP": true, + "STRUCTURE": true, + "STYLE": true, + "SUBCLASS_ORIGIN": true, + "SUBMULTISET": true, + "SUBSTRING": true, + "SUBSTRING_REGEX": true, + "SUCCEEDS": true, + "SUM": true, + "SYMMETRIC": true, + "SYSID": true, + "SYSTEM": true, + "SYSTEM_TIME": true, + "SYSTEM_USER": true, + "T": true, + "TABLE": true, + "TABLES": true, + "TABLESAMPLE": true, + "TABLESPACE": true, + "TABLE_NAME": true, + "TEMP": true, + "TEMPLATE": true, + "TEMPORARY": true, + "TEXT": true, + "THEN": true, + "TIES": true, + "TIME": true, + "TIMESTAMP": true, + "TIMEZONE_HOUR": true, + "TIMEZONE_MINUTE": true, + "TO": true, + "TOKEN": true, + "TOP_LEVEL_COUNT": true, + "TRAILING": true, + "TRANSACTION": true, + "TRANSACTIONS_COMMITTED": true, + "TRANSACTIONS_ROLLED_BACK": true, + "TRANSACTION_ACTIVE": true, + "TRANSFORM": true, + "TRANSFORMS": true, + "TRANSLATE": true, + "TRANSLATE_REGEX": true, + "TRANSLATION": true, + "TREAT": true, + "TRIGGER": true, + "TRIGGER_CATALOG": true, + "TRIGGER_NAME": true, + "TRIGGER_SCHEMA": true, + "TRIM": true, + "TRIM_ARRAY": true, + "TRUE": true, + "TRUNCATE": true, + "TRUSTED": true, + "TYPE": true, + "TYPES": true, + "UESCAPE": true, + "UNBOUNDED": true, + "UNCOMMITTED": true, + "UNDER": true, + "UNENCRYPTED": true, + "UNION": true, + "UNIQUE": true, + "UNKNOWN": true, + "UNLINK": true, + "UNLISTEN": true, + "UNLOGGED": true, + "UNNAMED": true, + "UNNEST": true, + "UNTIL": true, + "UNTYPED": true, + "UPDATE": true, + "UPPER": true, + "URI": true, + "USAGE": true, + "USER": true, + "USER_DEFINED_TYPE_CATALOG": true, + "USER_DEFINED_TYPE_CODE": true, + "USER_DEFINED_TYPE_NAME": true, + "USER_DEFINED_TYPE_SCHEMA": true, + "USING": true, + "VACUUM": true, + "VALID": true, + "VALIDATE": true, + "VALIDATOR": true, + "VALUE": true, + "VALUES": true, + "VALUE_OF": true, + "VARBINARY": true, + "VARCHAR": true, + "VARIADIC": true, + "VARYING": true, + "VAR_POP": true, + "VAR_SAMP": true, + "VERBOSE": true, + "VERSION": true, + "VERSIONING": true, + "VIEW": true, + "VOLATILE": true, + "WHEN": true, + "WHENEVER": true, + "WHERE": true, + "WHITESPACE": true, + "WIDTH_BUCKET": true, + "WINDOW": true, + "WITH": true, + "WITHIN": true, + "WITHOUT": true, + "WORK": true, + "WRAPPER": true, + "WRITE": true, + "XML": true, + "XMLAGG": true, + "XMLATTRIBUTES": true, + "XMLBINARY": true, + "XMLCAST": true, + "XMLCOMMENT": true, + "XMLCONCAT": true, + "XMLDECLARATION": true, + "XMLDOCUMENT": true, + "XMLELEMENT": true, + "XMLEXISTS": true, + "XMLFOREST": true, + "XMLITERATE": true, + "XMLNAMESPACES": true, + "XMLPARSE": true, + "XMLPI": true, + "XMLQUERY": true, + "XMLROOT": true, + "XMLSCHEMA": true, + "XMLSERIALIZE": true, + "XMLTABLE": true, + "XMLTEXT": true, + "XMLVALIDATE": true, + "YEAR": true, + "YES": true, + "ZONE": true, + } + + postgresQuoter = schemas.Quoter{ + Prefix: '"', + Suffix: '"', + IsReserved: schemas.AlwaysReserve, + } +) + +var ( + // DefaultPostgresSchema default postgres schema + DefaultPostgresSchema = "public" +) + +type postgres struct { + Base +} + +func (db *postgres) Init(uri *URI) error { + db.quoter = postgresQuoter + return db.Base.Init(db, uri) +} + +func (db *postgres) getSchema() string { + if db.uri.Schema != "" { + return db.uri.Schema + } + return DefaultPostgresSchema +} + +func (db *postgres) needQuote(name string) bool { + if db.IsReserved(name) { + return true + } + for _, c := range name { + if c >= 'A' && c <= 'Z' { + return true + } + } + return false +} + +func (db *postgres) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = postgresQuoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = postgresQuoter + q.IsReserved = db.needQuote + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = postgresQuoter + } +} + +func (db *postgres) SQLType(c *schemas.Column) string { + var res string + switch t := c.SQLType.Name; t { + case schemas.TinyInt: + res = schemas.SmallInt + return res + case schemas.Bit: + res = schemas.Boolean + return res + case schemas.MediumInt, schemas.Int, schemas.Integer: + if c.IsAutoIncrement { + return schemas.Serial + } + return schemas.Integer + case schemas.BigInt: + if c.IsAutoIncrement { + return schemas.BigSerial + } + return schemas.BigInt + case schemas.Serial, schemas.BigSerial: + c.IsAutoIncrement = true + c.Nullable = false + res = t + case schemas.Binary, schemas.VarBinary: + return schemas.Bytea + case schemas.DateTime: + res = schemas.TimeStamp + case schemas.TimeStampz: + return "timestamp with time zone" + case schemas.Float: + res = schemas.Real + case schemas.TinyText, schemas.MediumText, schemas.LongText: + res = schemas.Text + case schemas.NVarchar: + res = schemas.Varchar + case schemas.Uuid: + return schemas.Uuid + case schemas.Blob, schemas.TinyBlob, schemas.MediumBlob, schemas.LongBlob: + return schemas.Bytea + case schemas.Double: + return "DOUBLE PRECISION" + default: + if c.IsAutoIncrement { + return schemas.Serial + } + res = t + } + + if strings.EqualFold(res, "bool") { + // for bool, we don't need length information + return res + } + hasLen1 := (c.Length > 0) + hasLen2 := (c.Length2 > 0) + + if hasLen2 { + res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" + } else if hasLen1 { + res += "(" + strconv.Itoa(c.Length) + ")" + } + return res +} + +func (db *postgres) IsReserved(name string) bool { + _, ok := postgresReservedWords[strings.ToUpper(name)] + return ok +} + +func (db *postgres) AutoIncrStr() string { + return "" +} + +func (db *postgres) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql string + sql = "CREATE TABLE IF NOT EXISTS " + if tableName == "" { + tableName = table.Name + } + + quoter := db.Quoter() + sql += quoter.Quote(tableName) + sql += " (" + + if len(table.ColumnsSeq()) > 0 { + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + s, _ := ColumnString(db, col, col.IsPrimaryKey && len(pkList) == 1) + sql += s + sql = strings.TrimSpace(sql) + sql += ", " + } + + if len(pkList) > 1 { + sql += "PRIMARY KEY ( " + sql += quoter.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + } + sql += ")" + + return []string{sql}, true +} + +func (db *postgres) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + if len(db.getSchema()) == 0 { + args := []interface{}{tableName, idxName} + return `SELECT indexname FROM pg_indexes WHERE tablename = ? AND indexname = ?`, args + } + + args := []interface{}{db.getSchema(), tableName, idxName} + return `SELECT indexname FROM pg_indexes ` + + `WHERE schemaname = ? AND tablename = ? AND indexname = ?`, args +} + +func (db *postgres) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { + if len(db.getSchema()) == 0 { + return db.HasRecords(queryer, ctx, `SELECT tablename FROM pg_tables WHERE tablename = $1`, tableName) + } + + return db.HasRecords(queryer, ctx, `SELECT tablename FROM pg_tables WHERE schemaname = $1 AND tablename = $2`, + db.getSchema(), tableName) +} + +func (db *postgres) ModifyColumnSQL(tableName string, col *schemas.Column) string { + if len(db.getSchema()) == 0 || strings.Contains(tableName, ".") { + return fmt.Sprintf("alter table %s ALTER COLUMN %s TYPE %s", + tableName, col.Name, db.SQLType(col)) + } + return fmt.Sprintf("alter table %s.%s ALTER COLUMN %s TYPE %s", + db.getSchema(), tableName, col.Name, db.SQLType(col)) +} + +func (db *postgres) DropIndexSQL(tableName string, index *schemas.Index) string { + idxName := index.Name + + tableParts := strings.Split(strings.Replace(tableName, `"`, "", -1), ".") + tableName = tableParts[len(tableParts)-1] + + if !strings.HasPrefix(idxName, "UQE_") && + !strings.HasPrefix(idxName, "IDX_") { + if index.Type == schemas.UniqueType { + idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name) + } else { + idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name) + } + } + if db.getSchema() != "" { + idxName = db.getSchema() + "." + idxName + } + return fmt.Sprintf("DROP INDEX %v", db.Quoter().Quote(idxName)) +} + +func (db *postgres) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { + args := []interface{}{db.getSchema(), tableName, colName} + query := "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = $1 AND table_name = $2" + + " AND column_name = $3" + if len(db.getSchema()) == 0 { + args = []interface{}{tableName, colName} + query = "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = $1" + + " AND column_name = $2" + } + + rows, err := queryer.QueryContext(ctx, query, args...) + if err != nil { + return false, err + } + defer rows.Close() + + return rows.Next(), nil +} + +func (db *postgres) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{tableName} + s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length, + CASE WHEN p.contype = 'p' THEN true ELSE false END AS primarykey, + CASE WHEN p.contype = 'u' THEN true ELSE false END AS uniquekey +FROM pg_attribute f + JOIN pg_class c ON c.oid = f.attrelid JOIN pg_type t ON t.oid = f.atttypid + LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum + LEFT JOIN pg_namespace n ON n.oid = c.relnamespace + LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey) + LEFT JOIN pg_class AS g ON p.confrelid = g.oid + LEFT JOIN INFORMATION_SCHEMA.COLUMNS s ON s.column_name=f.attname AND c.relname=s.table_name +WHERE n.nspname= s.table_schema AND c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.attnum;` + + schema := db.getSchema() + if schema != "" { + s = fmt.Sprintf(s, "AND s.table_schema = $2") + args = append(args, schema) + } else { + s = fmt.Sprintf(s, "") + } + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + cols := make(map[string]*schemas.Column) + colSeq := make([]string, 0) + + for rows.Next() { + col := new(schemas.Column) + col.Indexes = make(map[string]int) + + var colName, isNullable, dataType string + var maxLenStr, colDefault *string + var isPK, isUnique bool + err = rows.Scan(&colName, &colDefault, &isNullable, &dataType, &maxLenStr, &isPK, &isUnique) + if err != nil { + return nil, nil, err + } + + var maxLen int + if maxLenStr != nil { + maxLen, err = strconv.Atoi(*maxLenStr) + if err != nil { + return nil, nil, err + } + } + + col.Name = strings.Trim(colName, `" `) + + if colDefault != nil { + var theDefault = *colDefault + // cockroach has type with the default value with ::: + // and postgres with ::, we should remove them before store them + idx := strings.Index(theDefault, ":::") + if idx == -1 { + idx = strings.Index(theDefault, "::") + } + if idx > -1 { + theDefault = theDefault[:idx] + } + + if strings.HasSuffix(theDefault, "+00:00'") { + theDefault = theDefault[:len(theDefault)-7] + "'" + } + + col.Default = theDefault + col.DefaultIsEmpty = false + if strings.HasPrefix(col.Default, "nextval(") { + col.IsAutoIncrement = true + col.Default = "" + col.DefaultIsEmpty = true + } + } else { + col.DefaultIsEmpty = true + } + + if isPK { + col.IsPrimaryKey = true + } + + col.Nullable = (isNullable == "YES") + + switch strings.ToLower(dataType) { + case "character varying", "character", "string": + col.SQLType = schemas.SQLType{Name: schemas.Varchar, DefaultLength: 0, DefaultLength2: 0} + case "timestamp without time zone": + col.SQLType = schemas.SQLType{Name: schemas.DateTime, DefaultLength: 0, DefaultLength2: 0} + case "timestamp with time zone": + col.SQLType = schemas.SQLType{Name: schemas.TimeStampz, DefaultLength: 0, DefaultLength2: 0} + case "double precision": + col.SQLType = schemas.SQLType{Name: schemas.Double, DefaultLength: 0, DefaultLength2: 0} + case "boolean": + col.SQLType = schemas.SQLType{Name: schemas.Bool, DefaultLength: 0, DefaultLength2: 0} + case "time without time zone": + col.SQLType = schemas.SQLType{Name: schemas.Time, DefaultLength: 0, DefaultLength2: 0} + case "bytes": + col.SQLType = schemas.SQLType{Name: schemas.Binary, DefaultLength: 0, DefaultLength2: 0} + case "oid": + col.SQLType = schemas.SQLType{Name: schemas.BigInt, DefaultLength: 0, DefaultLength2: 0} + case "array": + col.SQLType = schemas.SQLType{Name: schemas.Array, DefaultLength: 0, DefaultLength2: 0} + default: + startIdx := strings.Index(strings.ToLower(dataType), "string(") + if startIdx != -1 && strings.HasSuffix(dataType, ")") { + length := dataType[startIdx+8 : len(dataType)-1] + l, _ := strconv.Atoi(length) + col.SQLType = schemas.SQLType{Name: "STRING", DefaultLength: l, DefaultLength2: 0} + } else { + col.SQLType = schemas.SQLType{Name: strings.ToUpper(dataType), DefaultLength: 0, DefaultLength2: 0} + } + } + if _, ok := schemas.SqlTypes[col.SQLType.Name]; !ok { + return nil, nil, fmt.Errorf("Unknown colType: %s - %s", dataType, col.SQLType.Name) + } + + col.Length = maxLen + + if !col.DefaultIsEmpty { + if col.SQLType.IsText() { + if strings.HasSuffix(col.Default, "::character varying") { + col.Default = strings.TrimRight(col.Default, "::character varying") + } else if !strings.HasPrefix(col.Default, "'") { + col.Default = "'" + col.Default + "'" + } + } else if col.SQLType.IsTime() { + if strings.HasSuffix(col.Default, "::timestamp without time zone") { + col.Default = strings.TrimRight(col.Default, "::timestamp without time zone") + } + } + } + cols[col.Name] = col + colSeq = append(colSeq, col.Name) + } + + return colSeq, cols, nil +} + +func (db *postgres) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{} + s := "SELECT tablename FROM pg_tables" + schema := db.getSchema() + if schema != "" { + args = append(args, schema) + s = s + " WHERE schemaname = $1" + } + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]*schemas.Table, 0) + for rows.Next() { + table := schemas.NewEmptyTable() + var name string + err = rows.Scan(&name) + if err != nil { + return nil, err + } + table.Name = name + tables = append(tables, table) + } + return tables, nil +} + +func getIndexColName(indexdef string) []string { + var colNames []string + + cs := strings.Split(indexdef, "(") + for _, v := range strings.Split(strings.Split(cs[1], ")")[0], ",") { + colNames = append(colNames, strings.Split(strings.TrimLeft(v, " "), " ")[0]) + } + + return colNames +} + +func (db *postgres) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{tableName} + s := fmt.Sprintf("SELECT indexname, indexdef FROM pg_indexes WHERE tablename=$1") + if len(db.getSchema()) != 0 { + args = append(args, db.getSchema()) + s = s + " AND schemaname=$2" + } + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + indexes := make(map[string]*schemas.Index, 0) + for rows.Next() { + var indexType int + var indexName, indexdef string + var colNames []string + err = rows.Scan(&indexName, &indexdef) + if err != nil { + return nil, err + } + + if indexName == "primary" { + continue + } + indexName = strings.Trim(indexName, `" `) + if strings.HasSuffix(indexName, "_pkey") { + continue + } + if strings.HasPrefix(indexdef, "CREATE UNIQUE INDEX") { + indexType = schemas.UniqueType + } else { + indexType = schemas.IndexType + } + colNames = getIndexColName(indexdef) + var isRegular bool + if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { + newIdxName := indexName[5+len(tableName):] + isRegular = true + if newIdxName != "" { + indexName = newIdxName + } + } + + index := &schemas.Index{Name: indexName, Type: indexType, Cols: make([]string, 0)} + for _, colName := range colNames { + index.Cols = append(index.Cols, strings.TrimSpace(strings.Replace(colName, `"`, "", -1))) + } + index.IsRegular = isRegular + indexes[index.Name] = index + } + return indexes, nil +} + +func (db *postgres) Filters() []Filter { + return []Filter{&SeqFilter{Prefix: "$", Start: 1}} +} + +type pqDriver struct { +} + +type values map[string]string + +func (vs values) Set(k, v string) { + vs[k] = v +} + +func (vs values) Get(k string) (v string) { + return vs[k] +} + +func parseURL(connstr string) (string, error) { + u, err := url.Parse(connstr) + if err != nil { + return "", err + } + + if u.Scheme != "postgresql" && u.Scheme != "postgres" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) + + if u.Path != "" { + return escaper.Replace(u.Path[1:]), nil + } + + return "", nil +} + +func parseOpts(name string, o values) error { + if len(name) == 0 { + return fmt.Errorf("invalid options: %s", name) + } + + name = strings.TrimSpace(name) + + ps := strings.Split(name, " ") + for _, p := range ps { + kv := strings.Split(p, "=") + if len(kv) < 2 { + return fmt.Errorf("invalid option: %q", p) + } + o.Set(kv[0], kv[1]) + } + + return nil +} + +func (p *pqDriver) Parse(driverName, dataSourceName string) (*URI, error) { + db := &URI{DBType: schemas.POSTGRES} + var err error + + if strings.HasPrefix(dataSourceName, "postgresql://") || strings.HasPrefix(dataSourceName, "postgres://") { + db.DBName, err = parseURL(dataSourceName) + if err != nil { + return nil, err + } + } else { + o := make(values) + err = parseOpts(dataSourceName, o) + if err != nil { + return nil, err + } + + db.DBName = o.Get("dbname") + } + + if db.DBName == "" { + return nil, errors.New("dbname is empty") + } + + return db, nil +} + +type pqDriverPgx struct { + pqDriver +} + +func (pgx *pqDriverPgx) Parse(driverName, dataSourceName string) (*URI, error) { + // Remove the leading characters for driver to work + if len(dataSourceName) >= 9 && dataSourceName[0] == 0 { + dataSourceName = dataSourceName[9:] + } + return pgx.pqDriver.Parse(driverName, dataSourceName) +} + +// QueryDefaultPostgresSchema returns the default postgres schema +func QueryDefaultPostgresSchema(ctx context.Context, queryer core.Queryer) (string, error) { + rows, err := queryer.QueryContext(ctx, "SHOW SEARCH_PATH") + if err != nil { + return "", err + } + defer rows.Close() + if rows.Next() { + var defaultSchema string + if err = rows.Scan(&defaultSchema); err != nil { + return "", err + } + parts := strings.Split(defaultSchema, ",") + return strings.TrimSpace(parts[len(parts)-1]), nil + } + + return "", errors.New("No default schema") +} diff --git a/vendor/xorm.io/xorm/dialects/quote.go b/vendor/xorm.io/xorm/dialects/quote.go new file mode 100644 index 0000000000..da4e0dd609 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/quote.go @@ -0,0 +1,15 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +// QuotePolicy describes quote handle policy +type QuotePolicy int + +// All QuotePolicies +const ( + QuotePolicyAlways QuotePolicy = iota + QuotePolicyNone + QuotePolicyReserved +) diff --git a/vendor/xorm.io/xorm/dialects/sqlite3.go b/vendor/xorm.io/xorm/dialects/sqlite3.go new file mode 100644 index 0000000000..73f98beb9d --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/sqlite3.go @@ -0,0 +1,529 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "context" + "database/sql" + "errors" + "fmt" + "regexp" + "strings" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +var ( + sqlite3ReservedWords = map[string]bool{ + "ABORT": true, + "ACTION": true, + "ADD": true, + "AFTER": true, + "ALL": true, + "ALTER": true, + "ANALYZE": true, + "AND": true, + "AS": true, + "ASC": true, + "ATTACH": true, + "AUTOINCREMENT": true, + "BEFORE": true, + "BEGIN": true, + "BETWEEN": true, + "BY": true, + "CASCADE": true, + "CASE": true, + "CAST": true, + "CHECK": true, + "COLLATE": true, + "COLUMN": true, + "COMMIT": true, + "CONFLICT": true, + "CONSTRAINT": true, + "CREATE": true, + "CROSS": true, + "CURRENT_DATE": true, + "CURRENT_TIME": true, + "CURRENT_TIMESTAMP": true, + "DATABASE": true, + "DEFAULT": true, + "DEFERRABLE": true, + "DEFERRED": true, + "DELETE": true, + "DESC": true, + "DETACH": true, + "DISTINCT": true, + "DROP": true, + "EACH": true, + "ELSE": true, + "END": true, + "ESCAPE": true, + "EXCEPT": true, + "EXCLUSIVE": true, + "EXISTS": true, + "EXPLAIN": true, + "FAIL": true, + "FOR": true, + "FOREIGN": true, + "FROM": true, + "FULL": true, + "GLOB": true, + "GROUP": true, + "HAVING": true, + "IF": true, + "IGNORE": true, + "IMMEDIATE": true, + "IN": true, + "INDEX": true, + "INDEXED": true, + "INITIALLY": true, + "INNER": true, + "INSERT": true, + "INSTEAD": true, + "INTERSECT": true, + "INTO": true, + "IS": true, + "ISNULL": true, + "JOIN": true, + "KEY": true, + "LEFT": true, + "LIKE": true, + "LIMIT": true, + "MATCH": true, + "NATURAL": true, + "NO": true, + "NOT": true, + "NOTNULL": true, + "NULL": true, + "OF": true, + "OFFSET": true, + "ON": true, + "OR": true, + "ORDER": true, + "OUTER": true, + "PLAN": true, + "PRAGMA": true, + "PRIMARY": true, + "QUERY": true, + "RAISE": true, + "RECURSIVE": true, + "REFERENCES": true, + "REGEXP": true, + "REINDEX": true, + "RELEASE": true, + "RENAME": true, + "REPLACE": true, + "RESTRICT": true, + "RIGHT": true, + "ROLLBACK": true, + "ROW": true, + "SAVEPOINT": true, + "SELECT": true, + "SET": true, + "TABLE": true, + "TEMP": true, + "TEMPORARY": true, + "THEN": true, + "TO": true, + "TRANSACTI": true, + "TRIGGER": true, + "UNION": true, + "UNIQUE": true, + "UPDATE": true, + "USING": true, + "VACUUM": true, + "VALUES": true, + "VIEW": true, + "VIRTUAL": true, + "WHEN": true, + "WHERE": true, + "WITH": true, + "WITHOUT": true, + } + + sqlite3Quoter = schemas.Quoter{ + Prefix: '`', + Suffix: '`', + IsReserved: schemas.AlwaysReserve, + } +) + +type sqlite3 struct { + Base +} + +func (db *sqlite3) Init(uri *URI) error { + db.quoter = sqlite3Quoter + return db.Base.Init(db, uri) +} + +func (db *sqlite3) SetQuotePolicy(quotePolicy QuotePolicy) { + switch quotePolicy { + case QuotePolicyNone: + var q = sqlite3Quoter + q.IsReserved = schemas.AlwaysNoReserve + db.quoter = q + case QuotePolicyReserved: + var q = sqlite3Quoter + q.IsReserved = db.IsReserved + db.quoter = q + case QuotePolicyAlways: + fallthrough + default: + db.quoter = sqlite3Quoter + } +} + +func (db *sqlite3) SQLType(c *schemas.Column) string { + switch t := c.SQLType.Name; t { + case schemas.Bool: + if c.Default == "true" { + c.Default = "1" + } else if c.Default == "false" { + c.Default = "0" + } + return schemas.Integer + case schemas.Date, schemas.DateTime, schemas.TimeStamp, schemas.Time: + return schemas.DateTime + case schemas.TimeStampz: + return schemas.Text + case schemas.Char, schemas.Varchar, schemas.NVarchar, schemas.TinyText, + schemas.Text, schemas.MediumText, schemas.LongText, schemas.Json: + return schemas.Text + case schemas.Bit, schemas.TinyInt, schemas.SmallInt, schemas.MediumInt, schemas.Int, schemas.Integer, schemas.BigInt: + return schemas.Integer + case schemas.Float, schemas.Double, schemas.Real: + return schemas.Real + case schemas.Decimal, schemas.Numeric: + return schemas.Numeric + case schemas.TinyBlob, schemas.Blob, schemas.MediumBlob, schemas.LongBlob, schemas.Bytea, schemas.Binary, schemas.VarBinary: + return schemas.Blob + case schemas.Serial, schemas.BigSerial: + c.IsPrimaryKey = true + c.IsAutoIncrement = true + c.Nullable = false + return schemas.Integer + default: + return t + } +} + +func (db *sqlite3) FormatBytes(bs []byte) string { + return fmt.Sprintf("X'%x'", bs) +} + +func (db *sqlite3) IsReserved(name string) bool { + _, ok := sqlite3ReservedWords[strings.ToUpper(name)] + return ok +} + +func (db *sqlite3) AutoIncrStr() string { + return "AUTOINCREMENT" +} + +func (db *sqlite3) IndexCheckSQL(tableName, idxName string) (string, []interface{}) { + args := []interface{}{idxName} + return "SELECT name FROM sqlite_master WHERE type='index' and name = ?", args +} + +func (db *sqlite3) IsTableExist(queryer core.Queryer, ctx context.Context, tableName string) (bool, error) { + return db.HasRecords(queryer, ctx, "SELECT name FROM sqlite_master WHERE type='table' and name = ?", tableName) +} + +func (db *sqlite3) DropIndexSQL(tableName string, index *schemas.Index) string { + // var unique string + idxName := index.Name + + if !strings.HasPrefix(idxName, "UQE_") && + !strings.HasPrefix(idxName, "IDX_") { + if index.Type == schemas.UniqueType { + idxName = fmt.Sprintf("UQE_%v_%v", tableName, index.Name) + } else { + idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name) + } + } + return fmt.Sprintf("DROP INDEX %v", db.Quoter().Quote(idxName)) +} + +func (db *sqlite3) CreateTableSQL(table *schemas.Table, tableName string) ([]string, bool) { + var sql string + sql = "CREATE TABLE IF NOT EXISTS " + if tableName == "" { + tableName = table.Name + } + + quoter := db.Quoter() + sql += quoter.Quote(tableName) + sql += " (" + + if len(table.ColumnsSeq()) > 0 { + pkList := table.PrimaryKeys + + for _, colName := range table.ColumnsSeq() { + col := table.GetColumn(colName) + s, _ := ColumnString(db, col, col.IsPrimaryKey && len(pkList) == 1) + sql += s + sql = strings.TrimSpace(sql) + sql += ", " + } + + if len(pkList) > 1 { + sql += "PRIMARY KEY ( " + sql += quoter.Join(pkList, ",") + sql += " ), " + } + + sql = sql[:len(sql)-2] + } + sql += ")" + + return []string{sql}, true +} + +func (db *sqlite3) ForUpdateSQL(query string) string { + return query +} + +func (db *sqlite3) IsColumnExist(queryer core.Queryer, ctx context.Context, tableName, colName string) (bool, error) { + query := "SELECT * FROM " + tableName + " LIMIT 0" + rows, err := queryer.QueryContext(ctx, query) + if err != nil { + return false, err + } + defer rows.Close() + + cols, err := rows.Columns() + if err != nil { + return false, err + } + + for _, col := range cols { + if strings.EqualFold(col, colName) { + return true, nil + } + } + + return false, nil +} + +// splitColStr splits a sqlite col strings as fields +func splitColStr(colStr string) []string { + colStr = strings.TrimSpace(colStr) + var results = make([]string, 0, 10) + var lastIdx int + var hasC, hasQuote bool + for i, c := range colStr { + if c == ' ' && !hasQuote { + if hasC { + results = append(results, colStr[lastIdx:i]) + hasC = false + } + } else { + if c == '\'' { + hasQuote = !hasQuote + } + if !hasC { + lastIdx = i + } + hasC = true + if i == len(colStr)-1 { + results = append(results, colStr[lastIdx:i+1]) + } + } + } + return results +} + +func parseString(colStr string) (*schemas.Column, error) { + fields := splitColStr(colStr) + col := new(schemas.Column) + col.Indexes = make(map[string]int) + col.Nullable = true + col.DefaultIsEmpty = true + + for idx, field := range fields { + if idx == 0 { + col.Name = strings.Trim(strings.Trim(field, "`[] "), `"`) + continue + } else if idx == 1 { + col.SQLType = schemas.SQLType{Name: field, DefaultLength: 0, DefaultLength2: 0} + continue + } + switch field { + case "PRIMARY": + col.IsPrimaryKey = true + case "AUTOINCREMENT": + col.IsAutoIncrement = true + case "NULL": + if fields[idx-1] == "NOT" { + col.Nullable = false + } else { + col.Nullable = true + } + case "DEFAULT": + col.Default = fields[idx+1] + col.DefaultIsEmpty = false + } + } + return col, nil +} + +func (db *sqlite3) GetColumns(queryer core.Queryer, ctx context.Context, tableName string) ([]string, map[string]*schemas.Column, error) { + args := []interface{}{tableName} + s := "SELECT sql FROM sqlite_master WHERE type='table' and name = ?" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, nil, err + } + defer rows.Close() + + var name string + for rows.Next() { + err = rows.Scan(&name) + if err != nil { + return nil, nil, err + } + break + } + + if name == "" { + return nil, nil, errors.New("no table named " + tableName) + } + + nStart := strings.Index(name, "(") + nEnd := strings.LastIndex(name, ")") + reg := regexp.MustCompile(`[^\(,\)]*(\([^\(]*\))?`) + colCreates := reg.FindAllString(name[nStart+1:nEnd], -1) + cols := make(map[string]*schemas.Column) + colSeq := make([]string, 0) + + for _, colStr := range colCreates { + reg = regexp.MustCompile(`,\s`) + colStr = reg.ReplaceAllString(colStr, ",") + if strings.HasPrefix(strings.TrimSpace(colStr), "PRIMARY KEY") { + parts := strings.Split(strings.TrimSpace(colStr), "(") + if len(parts) == 2 { + pkCols := strings.Split(strings.TrimRight(strings.TrimSpace(parts[1]), ")"), ",") + for _, pk := range pkCols { + if col, ok := cols[strings.Trim(strings.TrimSpace(pk), "`")]; ok { + col.IsPrimaryKey = true + } + } + } + continue + } + + col, err := parseString(colStr) + if err != nil { + return colSeq, cols, err + } + + cols[col.Name] = col + colSeq = append(colSeq, col.Name) + } + return colSeq, cols, nil +} + +func (db *sqlite3) GetTables(queryer core.Queryer, ctx context.Context) ([]*schemas.Table, error) { + args := []interface{}{} + s := "SELECT name FROM sqlite_master WHERE type='table'" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]*schemas.Table, 0) + for rows.Next() { + table := schemas.NewEmptyTable() + err = rows.Scan(&table.Name) + if err != nil { + return nil, err + } + if table.Name == "sqlite_sequence" { + continue + } + tables = append(tables, table) + } + return tables, nil +} + +func (db *sqlite3) GetIndexes(queryer core.Queryer, ctx context.Context, tableName string) (map[string]*schemas.Index, error) { + args := []interface{}{tableName} + s := "SELECT sql FROM sqlite_master WHERE type='index' and tbl_name = ?" + + rows, err := queryer.QueryContext(ctx, s, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + indexes := make(map[string]*schemas.Index, 0) + for rows.Next() { + var tmpSQL sql.NullString + err = rows.Scan(&tmpSQL) + if err != nil { + return nil, err + } + + if !tmpSQL.Valid { + continue + } + sql := tmpSQL.String + + index := new(schemas.Index) + nNStart := strings.Index(sql, "INDEX") + nNEnd := strings.Index(sql, "ON") + if nNStart == -1 || nNEnd == -1 { + continue + } + + indexName := strings.Trim(sql[nNStart+6:nNEnd], "` []'\"") + var isRegular bool + if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { + index.Name = indexName[5+len(tableName):] + isRegular = true + } else { + index.Name = indexName + } + + if strings.HasPrefix(sql, "CREATE UNIQUE INDEX") { + index.Type = schemas.UniqueType + } else { + index.Type = schemas.IndexType + } + + nStart := strings.Index(sql, "(") + nEnd := strings.Index(sql, ")") + colIndexes := strings.Split(sql[nStart+1:nEnd], ",") + + index.Cols = make([]string, 0) + for _, col := range colIndexes { + index.Cols = append(index.Cols, strings.Trim(col, "` []")) + } + index.IsRegular = isRegular + indexes[index.Name] = index + } + + return indexes, nil +} + +func (db *sqlite3) Filters() []Filter { + return []Filter{} +} + +type sqlite3Driver struct { +} + +func (p *sqlite3Driver) Parse(driverName, dataSourceName string) (*URI, error) { + if strings.Contains(dataSourceName, "?") { + dataSourceName = dataSourceName[:strings.Index(dataSourceName, "?")] + } + + return &URI{DBType: schemas.SQLITE, DBName: dataSourceName}, nil +} diff --git a/vendor/xorm.io/xorm/dialects/table_name.go b/vendor/xorm.io/xorm/dialects/table_name.go new file mode 100644 index 0000000000..e190cd4bfa --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/table_name.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "fmt" + "reflect" + "strings" + + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/names" +) + +// TableNameWithSchema will add schema prefix on table name if possible +func TableNameWithSchema(dialect Dialect, tableName string) string { + // Add schema name as prefix of table name. + // Only for postgres database. + if dialect.URI().Schema != "" && + strings.Index(tableName, ".") == -1 { + return fmt.Sprintf("%s.%s", dialect.URI().Schema, tableName) + } + return tableName +} + +// TableNameNoSchema returns table name with given tableName +func TableNameNoSchema(dialect Dialect, mapper names.Mapper, tableName interface{}) string { + quote := dialect.Quoter().Quote + switch tableName.(type) { + case []string: + t := tableName.([]string) + if len(t) > 1 { + return fmt.Sprintf("%v AS %v", quote(t[0]), quote(t[1])) + } else if len(t) == 1 { + return quote(t[0]) + } + case []interface{}: + t := tableName.([]interface{}) + l := len(t) + var table string + if l > 0 { + f := t[0] + switch f.(type) { + case string: + table = f.(string) + case names.TableName: + table = f.(names.TableName).TableName() + default: + v := utils.ReflectValue(f) + t := v.Type() + if t.Kind() == reflect.Struct { + table = names.GetTableName(mapper, v) + } else { + table = quote(fmt.Sprintf("%v", f)) + } + } + } + if l > 1 { + return fmt.Sprintf("%v AS %v", quote(table), quote(fmt.Sprintf("%v", t[1]))) + } else if l == 1 { + return quote(table) + } + case names.TableName: + return tableName.(names.TableName).TableName() + case string: + return tableName.(string) + case reflect.Value: + v := tableName.(reflect.Value) + return names.GetTableName(mapper, v) + default: + v := utils.ReflectValue(tableName) + t := v.Type() + if t.Kind() == reflect.Struct { + return names.GetTableName(mapper, v) + } + return quote(fmt.Sprintf("%v", tableName)) + } + return "" +} + +// FullTableName returns table name with quote and schema according parameter +func FullTableName(dialect Dialect, mapper names.Mapper, bean interface{}, includeSchema ...bool) string { + tbName := TableNameNoSchema(dialect, mapper, bean) + if len(includeSchema) > 0 && includeSchema[0] && !utils.IsSubQuery(tbName) { + tbName = TableNameWithSchema(dialect, tbName) + } + return tbName +} diff --git a/vendor/xorm.io/xorm/dialects/time.go b/vendor/xorm.io/xorm/dialects/time.go new file mode 100644 index 0000000000..b0394745d7 --- /dev/null +++ b/vendor/xorm.io/xorm/dialects/time.go @@ -0,0 +1,49 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dialects + +import ( + "time" + + "xorm.io/xorm/schemas" +) + +// FormatTime format time as column type +func FormatTime(dialect Dialect, sqlTypeName string, t time.Time) (v interface{}) { + switch sqlTypeName { + case schemas.Time: + s := t.Format("2006-01-02 15:04:05") // time.RFC3339 + v = s[11:19] + case schemas.Date: + v = t.Format("2006-01-02") + case schemas.DateTime, schemas.TimeStamp, schemas.Varchar: // !DarthPestilane! format time when sqlTypeName is schemas.Varchar. + v = t.Format("2006-01-02 15:04:05") + case schemas.TimeStampz: + if dialect.URI().DBType == schemas.MSSQL { + v = t.Format("2006-01-02T15:04:05.9999999Z07:00") + } else { + v = t.Format(time.RFC3339Nano) + } + case schemas.BigInt, schemas.Int: + v = t.Unix() + default: + v = t + } + return +} + +func FormatColumnTime(dialect Dialect, defaultTimeZone *time.Location, col *schemas.Column, t time.Time) (v interface{}) { + if t.IsZero() { + if col.Nullable { + return nil + } + return "" + } + + if col.TimeZone != nil { + return FormatTime(dialect, col.SQLType.Name, t.In(col.TimeZone)) + } + return FormatTime(dialect, col.SQLType.Name, t.In(defaultTimeZone)) +} diff --git a/vendor/xorm.io/xorm/doc.go b/vendor/xorm.io/xorm/doc.go new file mode 100644 index 0000000000..ea6a222671 --- /dev/null +++ b/vendor/xorm.io/xorm/doc.go @@ -0,0 +1,184 @@ +// Copyright 2013 - 2016 The XORM Authors. All rights reserved. +// Use of this source code is governed by a BSD +// license that can be found in the LICENSE file. + +/* + +Package xorm is a simple and powerful ORM for Go. + +Installation + +Make sure you have installed Go 1.11+ and then: + + go get xorm.io/xorm + +Create Engine + +Firstly, we should new an engine for a database + + engine, err := xorm.NewEngine(driverName, dataSourceName) + +Method NewEngine's parameters is the same as sql.Open. It depends +drivers' implementation. +Generally, one engine for an application is enough. You can set it as package variable. + +Raw Methods + +XORM also support raw SQL execution: + +1. query a SQL string, the returned results is []map[string][]byte + + results, err := engine.Query("select * from user") + +2. execute a SQL string, the returned results + + affected, err := engine.Exec("update user set .... where ...") + +ORM Methods + +There are 8 major ORM methods and many helpful methods to use to operate database. + +1. Insert one or multiple records to database + + affected, err := engine.Insert(&struct) + // INSERT INTO struct () values () + affected, err := engine.Insert(&struct1, &struct2) + // INSERT INTO struct1 () values () + // INSERT INTO struct2 () values () + affected, err := engine.Insert(&sliceOfStruct) + // INSERT INTO struct () values (),(),() + affected, err := engine.Insert(&struct1, &sliceOfStruct2) + // INSERT INTO struct1 () values () + // INSERT INTO struct2 () values (),(),() + +2. Query one record or one variable from database + + has, err := engine.Get(&user) + // SELECT * FROM user LIMIT 1 + + var id int64 + has, err := engine.Table("user").Where("name = ?", name).Get(&id) + // SELECT id FROM user WHERE name = ? LIMIT 1 + +3. Query multiple records from database + + var sliceOfStructs []Struct + err := engine.Find(&sliceOfStructs) + // SELECT * FROM user + + var mapOfStructs = make(map[int64]Struct) + err := engine.Find(&mapOfStructs) + // SELECT * FROM user + + var int64s []int64 + err := engine.Table("user").Cols("id").Find(&int64s) + // SELECT id FROM user + +4. Query multiple records and record by record handle, there two methods, one is Iterate, +another is Rows + + err := engine.Iterate(...) + // SELECT * FROM user + + rows, err := engine.Rows(...) + // SELECT * FROM user + defer rows.Close() + bean := new(Struct) + for rows.Next() { + err = rows.Scan(bean) + } + +5. Update one or more records + + affected, err := engine.ID(...).Update(&user) + // UPDATE user SET ... + +6. Delete one or more records, Delete MUST has condition + + affected, err := engine.Where(...).Delete(&user) + // DELETE FROM user Where ... + +7. Count records + + counts, err := engine.Count(&user) + // SELECT count(*) AS total FROM user + + counts, err := engine.SQL("select count(*) FROM user").Count() + // select count(*) FROM user + +8. Sum records + + sumFloat64, err := engine.Sum(&user, "id") + // SELECT sum(id) from user + + sumFloat64s, err := engine.Sums(&user, "id1", "id2") + // SELECT sum(id1), sum(id2) from user + + sumInt64s, err := engine.SumsInt(&user, "id1", "id2") + // SELECT sum(id1), sum(id2) from user + +Conditions + +The above 8 methods could use with condition methods chainable. +Attention: the above 8 methods should be the last chainable method. + +1. ID, In + + engine.ID(1).Get(&user) // for single primary key + // SELECT * FROM user WHERE id = 1 + engine.ID(schemas.PK{1, 2}).Get(&user) // for composite primary keys + // SELECT * FROM user WHERE id1 = 1 AND id2 = 2 + engine.In("id", 1, 2, 3).Find(&users) + // SELECT * FROM user WHERE id IN (1, 2, 3) + engine.In("id", []int{1, 2, 3}).Find(&users) + // SELECT * FROM user WHERE id IN (1, 2, 3) + +2. Where, And, Or + + engine.Where().And().Or().Find() + // SELECT * FROM user WHERE (.. AND ..) OR ... + +3. OrderBy, Asc, Desc + + engine.Asc().Desc().Find() + // SELECT * FROM user ORDER BY .. ASC, .. DESC + engine.OrderBy().Find() + // SELECT * FROM user ORDER BY .. + +4. Limit, Top + + engine.Limit().Find() + // SELECT * FROM user LIMIT .. OFFSET .. + engine.Top(5).Find() + // SELECT TOP 5 * FROM user // for mssql + // SELECT * FROM user LIMIT .. OFFSET 0 //for other databases + +5. SQL, let you custom SQL + + var users []User + engine.SQL("select * from user").Find(&users) + +6. Cols, Omit, Distinct + + var users []*User + engine.Cols("col1, col2").Find(&users) + // SELECT col1, col2 FROM user + engine.Cols("col1", "col2").Where().Update(user) + // UPDATE user set col1 = ?, col2 = ? Where ... + engine.Omit("col1").Find(&users) + // SELECT col2, col3 FROM user + engine.Omit("col1").Insert(&user) + // INSERT INTO table (non-col1) VALUES () + engine.Distinct("col1").Find(&users) + // SELECT DISTINCT col1 FROM user + +7. Join, GroupBy, Having + + engine.GroupBy("name").Having("name='xlw'").Find(&users) + //SELECT * FROM user GROUP BY name HAVING name='xlw' + engine.Join("LEFT", "userdetail", "user.id=userdetail.id").Find(&users) + //SELECT * FROM user LEFT JOIN userdetail ON user.id=userdetail.id + +More usage, please visit http://xorm.io/docs +*/ +package xorm diff --git a/vendor/xorm.io/xorm/engine.go b/vendor/xorm.io/xorm/engine.go new file mode 100644 index 0000000000..4159a7b24b --- /dev/null +++ b/vendor/xorm.io/xorm/engine.go @@ -0,0 +1,1303 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "context" + "database/sql" + "errors" + "fmt" + "io" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "xorm.io/xorm/caches" + "xorm.io/xorm/contexts" + "xorm.io/xorm/core" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/log" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" + "xorm.io/xorm/tags" +) + +// Engine is the major struct of xorm, it means a database manager. +// Commonly, an application only need one engine +type Engine struct { + cacherMgr *caches.Manager + defaultContext context.Context + dialect dialects.Dialect + engineGroup *EngineGroup + logger log.ContextLogger + tagParser *tags.Parser + db *core.DB + + driverName string + dataSourceName string + + TZLocation *time.Location // The timezone of the application + DatabaseTZ *time.Location // The timezone of the database + + logSessionID bool // create session id +} + +// NewEngine new a db manager according to the parameter. Currently support four +// drivers +func NewEngine(driverName string, dataSourceName string) (*Engine, error) { + dialect, err := dialects.OpenDialect(driverName, dataSourceName) + if err != nil { + return nil, err + } + + db, err := core.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + + cacherMgr := caches.NewManager() + mapper := names.NewCacheMapper(new(names.SnakeMapper)) + tagParser := tags.NewParser("xorm", dialect, mapper, mapper, cacherMgr) + + engine := &Engine{ + dialect: dialect, + TZLocation: time.Local, + defaultContext: context.Background(), + cacherMgr: cacherMgr, + tagParser: tagParser, + driverName: driverName, + dataSourceName: dataSourceName, + db: db, + logSessionID: false, + } + + if dialect.URI().DBType == schemas.SQLITE { + engine.DatabaseTZ = time.UTC + } else { + engine.DatabaseTZ = time.Local + } + + logger := log.NewSimpleLogger(os.Stdout) + logger.SetLevel(log.LOG_INFO) + engine.SetLogger(log.NewLoggerAdapter(logger)) + + runtime.SetFinalizer(engine, func(engine *Engine) { + engine.Close() + }) + + return engine, nil +} + +// NewEngineWithParams new a db manager with params. The params will be passed to dialects. +func NewEngineWithParams(driverName string, dataSourceName string, params map[string]string) (*Engine, error) { + engine, err := NewEngine(driverName, dataSourceName) + engine.dialect.SetParams(params) + return engine, err +} + +// EnableSessionID if enable session id +func (engine *Engine) EnableSessionID(enable bool) { + engine.logSessionID = enable +} + +// SetCacher sets cacher for the table +func (engine *Engine) SetCacher(tableName string, cacher caches.Cacher) { + engine.cacherMgr.SetCacher(tableName, cacher) +} + +// GetCacher returns the cachher of the special table +func (engine *Engine) GetCacher(tableName string) caches.Cacher { + return engine.cacherMgr.GetCacher(tableName) +} + +// SetQuotePolicy sets the special quote policy +func (engine *Engine) SetQuotePolicy(quotePolicy dialects.QuotePolicy) { + engine.dialect.SetQuotePolicy(quotePolicy) +} + +// BufferSize sets buffer size for iterate +func (engine *Engine) BufferSize(size int) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.BufferSize(size) +} + +// ShowSQL show SQL statement or not on logger if log level is great than INFO +func (engine *Engine) ShowSQL(show ...bool) { + engine.logger.ShowSQL(show...) + engine.DB().Logger = engine.logger +} + +// Logger return the logger interface +func (engine *Engine) Logger() log.ContextLogger { + return engine.logger +} + +// SetLogger set the new logger +func (engine *Engine) SetLogger(logger interface{}) { + var realLogger log.ContextLogger + switch t := logger.(type) { + case log.ContextLogger: + realLogger = t + case log.Logger: + realLogger = log.NewLoggerAdapter(t) + } + engine.logger = realLogger + engine.DB().Logger = realLogger +} + +// SetLogLevel sets the logger level +func (engine *Engine) SetLogLevel(level log.LogLevel) { + engine.logger.SetLevel(level) +} + +// SetDisableGlobalCache disable global cache or not +func (engine *Engine) SetDisableGlobalCache(disable bool) { + engine.cacherMgr.SetDisableGlobalCache(disable) +} + +// DriverName return the current sql driver's name +func (engine *Engine) DriverName() string { + return engine.driverName +} + +// DataSourceName return the current connection string +func (engine *Engine) DataSourceName() string { + return engine.dataSourceName +} + +// SetMapper set the name mapping rules +func (engine *Engine) SetMapper(mapper names.Mapper) { + engine.SetTableMapper(mapper) + engine.SetColumnMapper(mapper) +} + +// SetTableMapper set the table name mapping rule +func (engine *Engine) SetTableMapper(mapper names.Mapper) { + engine.tagParser.SetTableMapper(mapper) +} + +// SetColumnMapper set the column name mapping rule +func (engine *Engine) SetColumnMapper(mapper names.Mapper) { + engine.tagParser.SetColumnMapper(mapper) +} + +// Quote Use QuoteStr quote the string sql +func (engine *Engine) Quote(value string) string { + value = strings.TrimSpace(value) + if len(value) == 0 { + return value + } + + buf := strings.Builder{} + engine.QuoteTo(&buf, value) + + return buf.String() +} + +// QuoteTo quotes string and writes into the buffer +func (engine *Engine) QuoteTo(buf *strings.Builder, value string) { + if buf == nil { + return + } + + value = strings.TrimSpace(value) + if value == "" { + return + } + engine.dialect.Quoter().QuoteTo(buf, value) +} + +// SQLType A simple wrapper to dialect's core.SqlType method +func (engine *Engine) SQLType(c *schemas.Column) string { + return engine.dialect.SQLType(c) +} + +// AutoIncrStr Database's autoincrement statement +func (engine *Engine) AutoIncrStr() string { + return engine.dialect.AutoIncrStr() +} + +// SetConnMaxLifetime sets the maximum amount of time a connection may be reused. +func (engine *Engine) SetConnMaxLifetime(d time.Duration) { + engine.DB().SetConnMaxLifetime(d) +} + +// SetMaxOpenConns is only available for go 1.2+ +func (engine *Engine) SetMaxOpenConns(conns int) { + engine.DB().SetMaxOpenConns(conns) +} + +// SetMaxIdleConns set the max idle connections on pool, default is 2 +func (engine *Engine) SetMaxIdleConns(conns int) { + engine.DB().SetMaxIdleConns(conns) +} + +// SetDefaultCacher set the default cacher. Xorm's default not enable cacher. +func (engine *Engine) SetDefaultCacher(cacher caches.Cacher) { + engine.cacherMgr.SetDefaultCacher(cacher) +} + +// GetDefaultCacher returns the default cacher +func (engine *Engine) GetDefaultCacher() caches.Cacher { + return engine.cacherMgr.GetDefaultCacher() +} + +// NoCache If you has set default cacher, and you want temporilly stop use cache, +// you can use NoCache() +func (engine *Engine) NoCache() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.NoCache() +} + +// NoCascade If you do not want to auto cascade load object +func (engine *Engine) NoCascade() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.NoCascade() +} + +// MapCacher Set a table use a special cacher +func (engine *Engine) MapCacher(bean interface{}, cacher caches.Cacher) error { + engine.SetCacher(dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean, true), cacher) + return nil +} + +// NewDB provides an interface to operate database directly +func (engine *Engine) NewDB() (*core.DB, error) { + return core.Open(engine.driverName, engine.dataSourceName) +} + +// DB return the wrapper of sql.DB +func (engine *Engine) DB() *core.DB { + return engine.db +} + +// Dialect return database dialect +func (engine *Engine) Dialect() dialects.Dialect { + return engine.dialect +} + +// NewSession New a session +func (engine *Engine) NewSession() *Session { + return newSession(engine) +} + +// Close the engine +func (engine *Engine) Close() error { + return engine.DB().Close() +} + +// Ping tests if database is alive +func (engine *Engine) Ping() error { + session := engine.NewSession() + defer session.Close() + return session.Ping() +} + +// SQL method let's you manually write raw SQL and operate +// For example: +// +// engine.SQL("select * from user").Find(&users) +// +// This code will execute "select * from user" and set the records to users +func (engine *Engine) SQL(query interface{}, args ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.SQL(query, args...) +} + +// NoAutoTime Default if your struct has "created" or "updated" filed tag, the fields +// will automatically be filled with current time when Insert or Update +// invoked. Call NoAutoTime if you dont' want to fill automatically. +func (engine *Engine) NoAutoTime() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.NoAutoTime() +} + +// NoAutoCondition disable auto generate Where condition from bean or not +func (engine *Engine) NoAutoCondition(no ...bool) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.NoAutoCondition(no...) +} + +func (engine *Engine) loadTableInfo(table *schemas.Table) error { + colSeq, cols, err := engine.dialect.GetColumns(engine.db, engine.defaultContext, table.Name) + if err != nil { + return err + } + for _, name := range colSeq { + table.AddColumn(cols[name]) + } + indexes, err := engine.dialect.GetIndexes(engine.db, engine.defaultContext, table.Name) + if err != nil { + return err + } + table.Indexes = indexes + + var seq int + for _, index := range indexes { + for _, name := range index.Cols { + parts := strings.Split(name, " ") + if len(parts) > 1 { + if parts[1] == "DESC" { + seq = 1 + } + } + if col := table.GetColumn(parts[0]); col != nil { + col.Indexes[index.Name] = index.Type + } else { + return fmt.Errorf("Unknown col %s seq %d, in index %v of table %v, columns %v", name, seq, index.Name, table.Name, table.ColumnsSeq()) + } + } + } + return nil +} + +// DBMetas Retrieve all tables, columns, indexes' informations from database. +func (engine *Engine) DBMetas() ([]*schemas.Table, error) { + tables, err := engine.dialect.GetTables(engine.db, engine.defaultContext) + if err != nil { + return nil, err + } + + for _, table := range tables { + if err = engine.loadTableInfo(table); err != nil { + return nil, err + } + } + return tables, nil +} + +// DumpAllToFile dump database all table structs and data to a file +func (engine *Engine) DumpAllToFile(fp string, tp ...schemas.DBType) error { + f, err := os.Create(fp) + if err != nil { + return err + } + defer f.Close() + return engine.DumpAll(f, tp...) +} + +// DumpAll dump database all table structs and data to w +func (engine *Engine) DumpAll(w io.Writer, tp ...schemas.DBType) error { + tables, err := engine.DBMetas() + if err != nil { + return err + } + return engine.DumpTables(tables, w, tp...) +} + +// DumpTablesToFile dump specified tables to SQL file. +func (engine *Engine) DumpTablesToFile(tables []*schemas.Table, fp string, tp ...schemas.DBType) error { + f, err := os.Create(fp) + if err != nil { + return err + } + defer f.Close() + return engine.DumpTables(tables, f, tp...) +} + +// DumpTables dump specify tables to io.Writer +func (engine *Engine) DumpTables(tables []*schemas.Table, w io.Writer, tp ...schemas.DBType) error { + return engine.dumpTables(tables, w, tp...) +} + +func formatColumnValue(dstDialect dialects.Dialect, d interface{}, col *schemas.Column) string { + if d == nil { + return "NULL" + } + + if dq, ok := d.(bool); ok && (dstDialect.URI().DBType == schemas.SQLITE || + dstDialect.URI().DBType == schemas.MSSQL) { + if dq { + return "1" + } + return "0" + } + + if col.SQLType.IsText() { + var v = fmt.Sprintf("%s", d) + return "'" + strings.Replace(v, "'", "''", -1) + "'" + } else if col.SQLType.IsTime() { + var v = fmt.Sprintf("%s", d) + if strings.HasSuffix(v, " +0000 UTC") { + return fmt.Sprintf("'%s'", v[0:len(v)-len(" +0000 UTC")]) + } else if strings.HasSuffix(v, " +0000 +0000") { + return fmt.Sprintf("'%s'", v[0:len(v)-len(" +0000 +0000")]) + } + return "'" + strings.Replace(v, "'", "''", -1) + "'" + } else if col.SQLType.IsBlob() { + if reflect.TypeOf(d).Kind() == reflect.Slice { + return fmt.Sprintf("%s", dstDialect.FormatBytes(d.([]byte))) + } else if reflect.TypeOf(d).Kind() == reflect.String { + return fmt.Sprintf("'%s'", d.(string)) + } + } else if col.SQLType.IsNumeric() { + switch reflect.TypeOf(d).Kind() { + case reflect.Slice: + if col.SQLType.Name == schemas.Bool { + return fmt.Sprintf("%v", strconv.FormatBool(d.([]byte)[0] != byte('0'))) + } + return fmt.Sprintf("%s", string(d.([]byte))) + case reflect.Int16, reflect.Int8, reflect.Int32, reflect.Int64, reflect.Int: + if col.SQLType.Name == schemas.Bool { + v := reflect.ValueOf(d).Int() > 0 + if dstDialect.URI().DBType == schemas.SQLITE { + if v { + return "1" + } + return "0" + } + return fmt.Sprintf("%v", strconv.FormatBool(v)) + } + return fmt.Sprintf("%v", d) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if col.SQLType.Name == schemas.Bool { + v := reflect.ValueOf(d).Uint() > 0 + if dstDialect.URI().DBType == schemas.SQLITE { + if v { + return "1" + } + return "0" + } + return fmt.Sprintf("%v", strconv.FormatBool(v)) + } + return fmt.Sprintf("%v", d) + default: + return fmt.Sprintf("%v", d) + } + } + + s := fmt.Sprintf("%v", d) + if strings.Contains(s, ":") || strings.Contains(s, "-") { + if strings.HasSuffix(s, " +0000 UTC") { + return fmt.Sprintf("'%s'", s[0:len(s)-len(" +0000 UTC")]) + } + return fmt.Sprintf("'%s'", s) + } + return s +} + +// dumpTables dump database all table structs and data to w with specify db type +func (engine *Engine) dumpTables(tables []*schemas.Table, w io.Writer, tp ...schemas.DBType) error { + var dstDialect dialects.Dialect + if len(tp) == 0 { + dstDialect = engine.dialect + } else { + dstDialect = dialects.QueryDialect(tp[0]) + if dstDialect == nil { + return errors.New("Unsupported database type") + } + + uri := engine.dialect.URI() + destURI := dialects.URI{ + DBType: tp[0], + DBName: uri.DBName, + } + dstDialect.Init(&destURI) + } + + _, err := io.WriteString(w, fmt.Sprintf("/*Generated by xorm %s, from %s to %s*/\n\n", + time.Now().In(engine.TZLocation).Format("2006-01-02 15:04:05"), engine.dialect.URI().DBType, dstDialect.URI().DBType)) + if err != nil { + return err + } + + for i, table := range tables { + tableName := table.Name + if dstDialect.URI().Schema != "" { + tableName = fmt.Sprintf("%s.%s", dstDialect.URI().Schema, table.Name) + } + originalTableName := table.Name + if engine.dialect.URI().Schema != "" { + originalTableName = fmt.Sprintf("%s.%s", engine.dialect.URI().Schema, table.Name) + } + if i > 0 { + _, err = io.WriteString(w, "\n") + if err != nil { + return err + } + } + sqls, _ := dstDialect.CreateTableSQL(table, tableName) + for _, s := range sqls { + _, err = io.WriteString(w, s+";\n") + if err != nil { + return err + } + } + if len(table.PKColumns()) > 0 && dstDialect.URI().DBType == schemas.MSSQL { + fmt.Fprintf(w, "SET IDENTITY_INSERT [%s] ON;\n", table.Name) + } + + for _, index := range table.Indexes { + _, err = io.WriteString(w, dstDialect.CreateIndexSQL(table.Name, index)+";\n") + if err != nil { + return err + } + } + + cols := table.ColumnsSeq() + colNames := engine.dialect.Quoter().Join(cols, ", ") + destColNames := dstDialect.Quoter().Join(cols, ", ") + + rows, err := engine.DB().QueryContext(engine.defaultContext, "SELECT "+colNames+" FROM "+engine.Quote(originalTableName)) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + dest := make([]interface{}, len(cols)) + err = rows.ScanSlice(&dest) + if err != nil { + return err + } + + _, err = io.WriteString(w, "INSERT INTO "+dstDialect.Quoter().Quote(tableName)+" ("+destColNames+") VALUES (") + if err != nil { + return err + } + + var temp string + for i, d := range dest { + col := table.GetColumn(cols[i]) + if col == nil { + return errors.New("unknow column error") + } + temp += "," + formatColumnValue(dstDialect, d, col) + } + _, err = io.WriteString(w, temp[1:]+");\n") + if err != nil { + return err + } + } + + // FIXME: Hack for postgres + if dstDialect.URI().DBType == schemas.POSTGRES && table.AutoIncrColumn() != nil { + _, err = io.WriteString(w, "SELECT setval('"+tableName+"_id_seq', COALESCE((SELECT MAX("+table.AutoIncrColumn().Name+") + 1 FROM "+dstDialect.Quoter().Quote(tableName)+"), 1), false);\n") + if err != nil { + return err + } + } + } + return nil +} + +// Cascade use cascade or not +func (engine *Engine) Cascade(trueOrFalse ...bool) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Cascade(trueOrFalse...) +} + +// Where method provide a condition query +func (engine *Engine) Where(query interface{}, args ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Where(query, args...) +} + +// ID method provoide a condition as (id) = ? +func (engine *Engine) ID(id interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.ID(id) +} + +// Before apply before Processor, affected bean is passed to closure arg +func (engine *Engine) Before(closures func(interface{})) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Before(closures) +} + +// After apply after insert Processor, affected bean is passed to closure arg +func (engine *Engine) After(closures func(interface{})) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.After(closures) +} + +// Charset set charset when create table, only support mysql now +func (engine *Engine) Charset(charset string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Charset(charset) +} + +// StoreEngine set store engine when create table, only support mysql now +func (engine *Engine) StoreEngine(storeEngine string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.StoreEngine(storeEngine) +} + +// Distinct use for distinct columns. Caution: when you are using cache, +// distinct will not be cached because cache system need id, +// but distinct will not provide id +func (engine *Engine) Distinct(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Distinct(columns...) +} + +// Select customerize your select columns or contents +func (engine *Engine) Select(str string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Select(str) +} + +// Cols only use the parameters as select or update columns +func (engine *Engine) Cols(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Cols(columns...) +} + +// AllCols indicates that all columns should be use +func (engine *Engine) AllCols() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.AllCols() +} + +// MustCols specify some columns must use even if they are empty +func (engine *Engine) MustCols(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.MustCols(columns...) +} + +// UseBool xorm automatically retrieve condition according struct, but +// if struct has bool field, it will ignore them. So use UseBool +// to tell system to do not ignore them. +// If no parameters, it will use all the bool field of struct, or +// it will use parameters's columns +func (engine *Engine) UseBool(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.UseBool(columns...) +} + +// Omit only not use the parameters as select or update columns +func (engine *Engine) Omit(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Omit(columns...) +} + +// Nullable set null when column is zero-value and nullable for update +func (engine *Engine) Nullable(columns ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Nullable(columns...) +} + +// In will generate "column IN (?, ?)" +func (engine *Engine) In(column string, args ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.In(column, args...) +} + +// NotIn will generate "column NOT IN (?, ?)" +func (engine *Engine) NotIn(column string, args ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.NotIn(column, args...) +} + +// Incr provides a update string like "column = column + ?" +func (engine *Engine) Incr(column string, arg ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Incr(column, arg...) +} + +// Decr provides a update string like "column = column - ?" +func (engine *Engine) Decr(column string, arg ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Decr(column, arg...) +} + +// SetExpr provides a update string like "column = {expression}" +func (engine *Engine) SetExpr(column string, expression interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.SetExpr(column, expression) +} + +// Table temporarily change the Get, Find, Update's table +func (engine *Engine) Table(tableNameOrBean interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Table(tableNameOrBean) +} + +// Alias set the table alias +func (engine *Engine) Alias(alias string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Alias(alias) +} + +// Limit will generate "LIMIT start, limit" +func (engine *Engine) Limit(limit int, start ...int) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Limit(limit, start...) +} + +// Desc will generate "ORDER BY column1 DESC, column2 DESC" +func (engine *Engine) Desc(colNames ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Desc(colNames...) +} + +// Asc will generate "ORDER BY column1,column2 Asc" +// This method can chainable use. +// +// engine.Desc("name").Asc("age").Find(&users) +// // SELECT * FROM user ORDER BY name DESC, age ASC +// +func (engine *Engine) Asc(colNames ...string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Asc(colNames...) +} + +// OrderBy will generate "ORDER BY order" +func (engine *Engine) OrderBy(order string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.OrderBy(order) +} + +// Prepare enables prepare statement +func (engine *Engine) Prepare() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Prepare() +} + +// Join the join_operator should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN +func (engine *Engine) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Join(joinOperator, tablename, condition, args...) +} + +// GroupBy generate group by statement +func (engine *Engine) GroupBy(keys string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.GroupBy(keys) +} + +// Having generate having statement +func (engine *Engine) Having(conditions string) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Having(conditions) +} + +// Table table struct +type Table struct { + *schemas.Table + Name string +} + +// IsValid if table is valid +func (t *Table) IsValid() bool { + return t.Table != nil && len(t.Name) > 0 +} + +// TableInfo get table info according to bean's content +func (engine *Engine) TableInfo(bean interface{}) (*schemas.Table, error) { + v := utils.ReflectValue(bean) + return engine.tagParser.ParseWithCache(v) +} + +// IsTableEmpty if a table has any reocrd +func (engine *Engine) IsTableEmpty(bean interface{}) (bool, error) { + session := engine.NewSession() + defer session.Close() + return session.IsTableEmpty(bean) +} + +// IsTableExist if a table is exist +func (engine *Engine) IsTableExist(beanOrTableName interface{}) (bool, error) { + session := engine.NewSession() + defer session.Close() + return session.IsTableExist(beanOrTableName) +} + +// TableName returns table name with schema prefix if has +func (engine *Engine) TableName(bean interface{}, includeSchema ...bool) string { + return dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean, includeSchema...) +} + +// CreateIndexes create indexes +func (engine *Engine) CreateIndexes(bean interface{}) error { + session := engine.NewSession() + defer session.Close() + return session.CreateIndexes(bean) +} + +// CreateUniques create uniques +func (engine *Engine) CreateUniques(bean interface{}) error { + session := engine.NewSession() + defer session.Close() + return session.CreateUniques(bean) +} + +// ClearCacheBean if enabled cache, clear the cache bean +func (engine *Engine) ClearCacheBean(bean interface{}, id string) error { + tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) + cacher := engine.GetCacher(tableName) + if cacher != nil { + cacher.ClearIds(tableName) + cacher.DelBean(tableName, id) + } + return nil +} + +// ClearCache if enabled cache, clear some tables' cache +func (engine *Engine) ClearCache(beans ...interface{}) error { + for _, bean := range beans { + tableName := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) + cacher := engine.GetCacher(tableName) + if cacher != nil { + cacher.ClearIds(tableName) + cacher.ClearBeans(tableName) + } + } + return nil +} + +// UnMapType remove table from tables cache +func (engine *Engine) UnMapType(t reflect.Type) { + engine.tagParser.ClearCacheTable(t) +} + +// Sync the new struct changes to database, this method will automatically add +// table, column, index, unique. but will not delete or change anything. +// If you change some field, you should change the database manually. +func (engine *Engine) Sync(beans ...interface{}) error { + session := engine.NewSession() + defer session.Close() + + for _, bean := range beans { + v := utils.ReflectValue(bean) + tableNameNoSchema := dialects.FullTableName(engine.dialect, engine.GetTableMapper(), bean) + table, err := engine.tagParser.ParseWithCache(v) + if err != nil { + return err + } + + isExist, err := session.Table(bean).isTableExist(tableNameNoSchema) + if err != nil { + return err + } + if !isExist { + err = session.createTable(bean) + if err != nil { + return err + } + } + /*isEmpty, err := engine.IsEmptyTable(bean) + if err != nil { + return err + }*/ + var isEmpty bool + if isEmpty { + err = session.dropTable(bean) + if err != nil { + return err + } + err = session.createTable(bean) + if err != nil { + return err + } + } else { + for _, col := range table.Columns() { + isExist, err := engine.dialect.IsColumnExist(engine.db, session.ctx, tableNameNoSchema, col.Name) + if err != nil { + return err + } + if !isExist { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + err = session.addColumn(col.Name) + if err != nil { + return err + } + } + } + + for name, index := range table.Indexes { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + if index.Type == schemas.UniqueType { + isExist, err := session.isIndexExist2(tableNameNoSchema, index.Cols, true) + if err != nil { + return err + } + if !isExist { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + err = session.addUnique(tableNameNoSchema, name) + if err != nil { + return err + } + } + } else if index.Type == schemas.IndexType { + isExist, err := session.isIndexExist2(tableNameNoSchema, index.Cols, false) + if err != nil { + return err + } + if !isExist { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + err = session.addIndex(tableNameNoSchema, name) + if err != nil { + return err + } + } + } else { + return errors.New("unknow index type") + } + } + } + } + return nil +} + +// Sync2 synchronize structs to database tables +func (engine *Engine) Sync2(beans ...interface{}) error { + s := engine.NewSession() + defer s.Close() + return s.Sync2(beans...) +} + +// CreateTables create tabls according bean +func (engine *Engine) CreateTables(beans ...interface{}) error { + session := engine.NewSession() + defer session.Close() + + err := session.Begin() + if err != nil { + return err + } + + for _, bean := range beans { + err = session.createTable(bean) + if err != nil { + session.Rollback() + return err + } + } + return session.Commit() +} + +// DropTables drop specify tables +func (engine *Engine) DropTables(beans ...interface{}) error { + session := engine.NewSession() + defer session.Close() + + err := session.Begin() + if err != nil { + return err + } + + for _, bean := range beans { + err = session.dropTable(bean) + if err != nil { + session.Rollback() + return err + } + } + return session.Commit() +} + +// DropIndexes drop indexes of a table +func (engine *Engine) DropIndexes(bean interface{}) error { + session := engine.NewSession() + defer session.Close() + return session.DropIndexes(bean) +} + +// Exec raw sql +func (engine *Engine) Exec(sqlOrArgs ...interface{}) (sql.Result, error) { + session := engine.NewSession() + defer session.Close() + return session.Exec(sqlOrArgs...) +} + +// Query a raw sql and return records as []map[string][]byte +func (engine *Engine) Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error) { + session := engine.NewSession() + defer session.Close() + return session.Query(sqlOrArgs...) +} + +// QueryString runs a raw sql and return records as []map[string]string +func (engine *Engine) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) { + session := engine.NewSession() + defer session.Close() + return session.QueryString(sqlOrArgs...) +} + +// QueryInterface runs a raw sql and return records as []map[string]interface{} +func (engine *Engine) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) { + session := engine.NewSession() + defer session.Close() + return session.QueryInterface(sqlOrArgs...) +} + +// Insert one or more records +func (engine *Engine) Insert(beans ...interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.Insert(beans...) +} + +// InsertOne insert only one record +func (engine *Engine) InsertOne(bean interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.InsertOne(bean) +} + +// Update records, bean's non-empty fields are updated contents, +// condiBean' non-empty filds are conditions +// CAUTION: +// 1.bool will defaultly be updated content nor conditions +// You should call UseBool if you have bool to use. +// 2.float32 & float64 may be not inexact as conditions +func (engine *Engine) Update(bean interface{}, condiBeans ...interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.Update(bean, condiBeans...) +} + +// Delete records, bean's non-empty fields are conditions +func (engine *Engine) Delete(bean interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.Delete(bean) +} + +// Get retrieve one record from table, bean's non-empty fields +// are conditions +func (engine *Engine) Get(bean interface{}) (bool, error) { + session := engine.NewSession() + defer session.Close() + return session.Get(bean) +} + +// Exist returns true if the record exist otherwise return false +func (engine *Engine) Exist(bean ...interface{}) (bool, error) { + session := engine.NewSession() + defer session.Close() + return session.Exist(bean...) +} + +// Find retrieve records from table, condiBeans's non-empty fields +// are conditions. beans could be []Struct, []*Struct, map[int64]Struct +// map[int64]*Struct +func (engine *Engine) Find(beans interface{}, condiBeans ...interface{}) error { + session := engine.NewSession() + defer session.Close() + return session.Find(beans, condiBeans...) +} + +// FindAndCount find the results and also return the counts +func (engine *Engine) FindAndCount(rowsSlicePtr interface{}, condiBean ...interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.FindAndCount(rowsSlicePtr, condiBean...) +} + +// Iterate record by record handle records from table, bean's non-empty fields +// are conditions. +func (engine *Engine) Iterate(bean interface{}, fun IterFunc) error { + session := engine.NewSession() + defer session.Close() + return session.Iterate(bean, fun) +} + +// Rows return sql.Rows compatible Rows obj, as a forward Iterator object for iterating record by record, bean's non-empty fields +// are conditions. +func (engine *Engine) Rows(bean interface{}) (*Rows, error) { + session := engine.NewSession() + return session.Rows(bean) +} + +// Count counts the records. bean's non-empty fields are conditions. +func (engine *Engine) Count(bean ...interface{}) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.Count(bean...) +} + +// Sum sum the records by some column. bean's non-empty fields are conditions. +func (engine *Engine) Sum(bean interface{}, colName string) (float64, error) { + session := engine.NewSession() + defer session.Close() + return session.Sum(bean, colName) +} + +// SumInt sum the records by some column. bean's non-empty fields are conditions. +func (engine *Engine) SumInt(bean interface{}, colName string) (int64, error) { + session := engine.NewSession() + defer session.Close() + return session.SumInt(bean, colName) +} + +// Sums sum the records by some columns. bean's non-empty fields are conditions. +func (engine *Engine) Sums(bean interface{}, colNames ...string) ([]float64, error) { + session := engine.NewSession() + defer session.Close() + return session.Sums(bean, colNames...) +} + +// SumsInt like Sums but return slice of int64 instead of float64. +func (engine *Engine) SumsInt(bean interface{}, colNames ...string) ([]int64, error) { + session := engine.NewSession() + defer session.Close() + return session.SumsInt(bean, colNames...) +} + +// ImportFile SQL DDL file +func (engine *Engine) ImportFile(ddlPath string) ([]sql.Result, error) { + session := engine.NewSession() + defer session.Close() + return session.ImportFile(ddlPath) +} + +// Import SQL DDL from io.Reader +func (engine *Engine) Import(r io.Reader) ([]sql.Result, error) { + session := engine.NewSession() + defer session.Close() + return session.Import(r) +} + +// nowTime return current time +func (engine *Engine) nowTime(col *schemas.Column) (interface{}, time.Time) { + t := time.Now() + var tz = engine.DatabaseTZ + if !col.DisableTimeZone && col.TimeZone != nil { + tz = col.TimeZone + } + return dialects.FormatTime(engine.dialect, col.SQLType.Name, t.In(tz)), t.In(engine.TZLocation) +} + +// GetColumnMapper returns the column name mapper +func (engine *Engine) GetColumnMapper() names.Mapper { + return engine.tagParser.GetColumnMapper() +} + +// GetTableMapper returns the table name mapper +func (engine *Engine) GetTableMapper() names.Mapper { + return engine.tagParser.GetTableMapper() +} + +// GetTZLocation returns time zone of the application +func (engine *Engine) GetTZLocation() *time.Location { + return engine.TZLocation +} + +// SetTZLocation sets time zone of the application +func (engine *Engine) SetTZLocation(tz *time.Location) { + engine.TZLocation = tz +} + +// GetTZDatabase returns time zone of the database +func (engine *Engine) GetTZDatabase() *time.Location { + return engine.DatabaseTZ +} + +// SetTZDatabase sets time zone of the database +func (engine *Engine) SetTZDatabase(tz *time.Location) { + engine.DatabaseTZ = tz +} + +// SetSchema sets the schema of database +func (engine *Engine) SetSchema(schema string) { + engine.dialect.URI().SetSchema(schema) +} + +func (engine *Engine) AddHook(hook contexts.Hook) { + engine.db.AddHook(hook) +} + +// Unscoped always disable struct tag "deleted" +func (engine *Engine) Unscoped() *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Unscoped() +} + +func (engine *Engine) tbNameWithSchema(v string) string { + return dialects.TableNameWithSchema(engine.dialect, v) +} + +// ContextHook creates a session with the context +func (engine *Engine) Context(ctx context.Context) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Context(ctx) +} + +// SetDefaultContext set the default context +func (engine *Engine) SetDefaultContext(ctx context.Context) { + engine.defaultContext = ctx +} + +// PingContext tests if database is alive +func (engine *Engine) PingContext(ctx context.Context) error { + session := engine.NewSession() + defer session.Close() + return session.PingContext(ctx) +} + +// Transaction Execute sql wrapped in a transaction(abbr as tx), tx will automatic commit if no errors occurred +func (engine *Engine) Transaction(f func(*Session) (interface{}, error)) (interface{}, error) { + session := engine.NewSession() + defer session.Close() + + if err := session.Begin(); err != nil { + return nil, err + } + + result, err := f(session) + if err != nil { + return result, err + } + + if err := session.Commit(); err != nil { + return result, err + } + + return result, nil +} diff --git a/vendor/xorm.io/xorm/engine_group.go b/vendor/xorm.io/xorm/engine_group.go new file mode 100644 index 0000000000..cdd9dd445a --- /dev/null +++ b/vendor/xorm.io/xorm/engine_group.go @@ -0,0 +1,230 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "context" + "time" + + "xorm.io/xorm/caches" + "xorm.io/xorm/contexts" + "xorm.io/xorm/dialects" + "xorm.io/xorm/log" + "xorm.io/xorm/names" +) + +// EngineGroup defines an engine group +type EngineGroup struct { + *Engine + slaves []*Engine + policy GroupPolicy +} + +// NewEngineGroup creates a new engine group +func NewEngineGroup(args1 interface{}, args2 interface{}, policies ...GroupPolicy) (*EngineGroup, error) { + var eg EngineGroup + if len(policies) > 0 { + eg.policy = policies[0] + } else { + eg.policy = RoundRobinPolicy() + } + + driverName, ok1 := args1.(string) + conns, ok2 := args2.([]string) + if ok1 && ok2 { + engines := make([]*Engine, len(conns)) + for i, conn := range conns { + engine, err := NewEngine(driverName, conn) + if err != nil { + return nil, err + } + engine.engineGroup = &eg + engines[i] = engine + } + + eg.Engine = engines[0] + eg.slaves = engines[1:] + return &eg, nil + } + + master, ok3 := args1.(*Engine) + slaves, ok4 := args2.([]*Engine) + if ok3 && ok4 { + master.engineGroup = &eg + for i := 0; i < len(slaves); i++ { + slaves[i].engineGroup = &eg + } + eg.Engine = master + eg.slaves = slaves + return &eg, nil + } + return nil, ErrParamsType +} + +// Close the engine +func (eg *EngineGroup) Close() error { + err := eg.Engine.Close() + if err != nil { + return err + } + + for i := 0; i < len(eg.slaves); i++ { + err := eg.slaves[i].Close() + if err != nil { + return err + } + } + return nil +} + +// ContextHook returned a group session +func (eg *EngineGroup) Context(ctx context.Context) *Session { + sess := eg.NewSession() + sess.isAutoClose = true + return sess.Context(ctx) +} + +// NewSession returned a group session +func (eg *EngineGroup) NewSession() *Session { + sess := eg.Engine.NewSession() + sess.sessionType = groupSession + return sess +} + +// Master returns the master engine +func (eg *EngineGroup) Master() *Engine { + return eg.Engine +} + +// Ping tests if database is alive +func (eg *EngineGroup) Ping() error { + if err := eg.Engine.Ping(); err != nil { + return err + } + + for _, slave := range eg.slaves { + if err := slave.Ping(); err != nil { + return err + } + } + return nil +} + +// SetColumnMapper set the column name mapping rule +func (eg *EngineGroup) SetColumnMapper(mapper names.Mapper) { + eg.Engine.SetColumnMapper(mapper) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetColumnMapper(mapper) + } +} + +// SetConnMaxLifetime sets the maximum amount of time a connection may be reused. +func (eg *EngineGroup) SetConnMaxLifetime(d time.Duration) { + eg.Engine.SetConnMaxLifetime(d) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetConnMaxLifetime(d) + } +} + +// SetDefaultCacher set the default cacher +func (eg *EngineGroup) SetDefaultCacher(cacher caches.Cacher) { + eg.Engine.SetDefaultCacher(cacher) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetDefaultCacher(cacher) + } +} + +// SetLogger set the new logger +func (eg *EngineGroup) SetLogger(logger interface{}) { + eg.Engine.SetLogger(logger) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetLogger(logger) + } +} + +func (eg *EngineGroup) AddHook(hook contexts.Hook) { + eg.Engine.AddHook(hook) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].AddHook(hook) + } +} + +// SetLogLevel sets the logger level +func (eg *EngineGroup) SetLogLevel(level log.LogLevel) { + eg.Engine.SetLogLevel(level) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetLogLevel(level) + } +} + +// SetMapper set the name mapping rules +func (eg *EngineGroup) SetMapper(mapper names.Mapper) { + eg.Engine.SetMapper(mapper) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetMapper(mapper) + } +} + +// SetMaxIdleConns set the max idle connections on pool, default is 2 +func (eg *EngineGroup) SetMaxIdleConns(conns int) { + eg.Engine.DB().SetMaxIdleConns(conns) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].DB().SetMaxIdleConns(conns) + } +} + +// SetMaxOpenConns is only available for go 1.2+ +func (eg *EngineGroup) SetMaxOpenConns(conns int) { + eg.Engine.DB().SetMaxOpenConns(conns) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].DB().SetMaxOpenConns(conns) + } +} + +// SetPolicy set the group policy +func (eg *EngineGroup) SetPolicy(policy GroupPolicy) *EngineGroup { + eg.policy = policy + return eg +} + +// SetQuotePolicy sets the special quote policy +func (eg *EngineGroup) SetQuotePolicy(quotePolicy dialects.QuotePolicy) { + eg.Engine.SetQuotePolicy(quotePolicy) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetQuotePolicy(quotePolicy) + } +} + +// SetTableMapper set the table name mapping rule +func (eg *EngineGroup) SetTableMapper(mapper names.Mapper) { + eg.Engine.SetTableMapper(mapper) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].SetTableMapper(mapper) + } +} + +// ShowSQL show SQL statement or not on logger if log level is great than INFO +func (eg *EngineGroup) ShowSQL(show ...bool) { + eg.Engine.ShowSQL(show...) + for i := 0; i < len(eg.slaves); i++ { + eg.slaves[i].ShowSQL(show...) + } +} + +// Slave returns one of the physical databases which is a slave according the policy +func (eg *EngineGroup) Slave() *Engine { + switch len(eg.slaves) { + case 0: + return eg.Engine + case 1: + return eg.slaves[0] + } + return eg.policy.Slave(eg) +} + +// Slaves returns all the slaves +func (eg *EngineGroup) Slaves() []*Engine { + return eg.slaves +} diff --git a/vendor/xorm.io/xorm/engine_group_policy.go b/vendor/xorm.io/xorm/engine_group_policy.go new file mode 100644 index 0000000000..1def8ce4c8 --- /dev/null +++ b/vendor/xorm.io/xorm/engine_group_policy.go @@ -0,0 +1,118 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "math/rand" + "sync" + "time" +) + +// GroupPolicy is be used by chosing the current slave from slaves +type GroupPolicy interface { + Slave(*EngineGroup) *Engine +} + +// GroupPolicyHandler should be used when a function is a GroupPolicy +type GroupPolicyHandler func(*EngineGroup) *Engine + +// Slave implements the chosen of slaves +func (h GroupPolicyHandler) Slave(eg *EngineGroup) *Engine { + return h(eg) +} + +// RandomPolicy implmentes randomly chose the slave of slaves +func RandomPolicy() GroupPolicyHandler { + var r = rand.New(rand.NewSource(time.Now().UnixNano())) + return func(g *EngineGroup) *Engine { + return g.Slaves()[r.Intn(len(g.Slaves()))] + } +} + +// WeightRandomPolicy implmentes randomly chose the slave of slaves +func WeightRandomPolicy(weights []int) GroupPolicyHandler { + var rands = make([]int, 0, len(weights)) + for i := 0; i < len(weights); i++ { + for n := 0; n < weights[i]; n++ { + rands = append(rands, i) + } + } + var r = rand.New(rand.NewSource(time.Now().UnixNano())) + + return func(g *EngineGroup) *Engine { + var slaves = g.Slaves() + idx := rands[r.Intn(len(rands))] + if idx >= len(slaves) { + idx = len(slaves) - 1 + } + return slaves[idx] + } +} + +// RoundRobinPolicy returns a group policy handler +func RoundRobinPolicy() GroupPolicyHandler { + var pos = -1 + var lock sync.Mutex + return func(g *EngineGroup) *Engine { + var slaves = g.Slaves() + + lock.Lock() + defer lock.Unlock() + pos++ + if pos >= len(slaves) { + pos = 0 + } + + return slaves[pos] + } +} + +// WeightRoundRobinPolicy returns a group policy handler +func WeightRoundRobinPolicy(weights []int) GroupPolicyHandler { + var rands = make([]int, 0, len(weights)) + for i := 0; i < len(weights); i++ { + for n := 0; n < weights[i]; n++ { + rands = append(rands, i) + } + } + var pos = -1 + var lock sync.Mutex + + return func(g *EngineGroup) *Engine { + var slaves = g.Slaves() + lock.Lock() + defer lock.Unlock() + pos++ + if pos >= len(rands) { + pos = 0 + } + + idx := rands[pos] + if idx >= len(slaves) { + idx = len(slaves) - 1 + } + return slaves[idx] + } +} + +// LeastConnPolicy implements GroupPolicy, every time will get the least connections slave +func LeastConnPolicy() GroupPolicyHandler { + return func(g *EngineGroup) *Engine { + var slaves = g.Slaves() + connections := 0 + idx := 0 + for i := 0; i < len(slaves); i++ { + openConnections := slaves[i].DB().Stats().OpenConnections + if i == 0 { + connections = openConnections + idx = i + } else if openConnections <= connections { + connections = openConnections + idx = i + } + } + return slaves[idx] + } +} diff --git a/vendor/xorm.io/xorm/error.go b/vendor/xorm.io/xorm/error.go new file mode 100644 index 0000000000..cfa5c81933 --- /dev/null +++ b/vendor/xorm.io/xorm/error.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "errors" +) + +var ( + // ErrPtrSliceType represents a type error + ErrPtrSliceType = errors.New("A point to a slice is needed") + // ErrParamsType params error + ErrParamsType = errors.New("Params type error") + // ErrTableNotFound table not found error + ErrTableNotFound = errors.New("Table not found") + // ErrUnSupportedType unsupported error + ErrUnSupportedType = errors.New("Unsupported type error") + // ErrNotExist record does not exist error + ErrNotExist = errors.New("Record does not exist") + // ErrCacheFailed cache failed error + ErrCacheFailed = errors.New("Cache failed") + // ErrConditionType condition type unsupported + ErrConditionType = errors.New("Unsupported condition type") +) diff --git a/vendor/xorm.io/xorm/interface.go b/vendor/xorm.io/xorm/interface.go new file mode 100644 index 0000000000..6aac4ae843 --- /dev/null +++ b/vendor/xorm.io/xorm/interface.go @@ -0,0 +1,129 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "context" + "database/sql" + "reflect" + "time" + + "xorm.io/xorm/caches" + "xorm.io/xorm/contexts" + "xorm.io/xorm/dialects" + "xorm.io/xorm/log" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" +) + +// Interface defines the interface which Engine, EngineGroup and Session will implementate. +type Interface interface { + AllCols() *Session + Alias(alias string) *Session + Asc(colNames ...string) *Session + BufferSize(size int) *Session + Cols(columns ...string) *Session + Count(...interface{}) (int64, error) + CreateIndexes(bean interface{}) error + CreateUniques(bean interface{}) error + Decr(column string, arg ...interface{}) *Session + Desc(...string) *Session + Delete(interface{}) (int64, error) + Distinct(columns ...string) *Session + DropIndexes(bean interface{}) error + Exec(sqlOrArgs ...interface{}) (sql.Result, error) + Exist(bean ...interface{}) (bool, error) + Find(interface{}, ...interface{}) error + FindAndCount(interface{}, ...interface{}) (int64, error) + Get(interface{}) (bool, error) + GroupBy(keys string) *Session + ID(interface{}) *Session + In(string, ...interface{}) *Session + Incr(column string, arg ...interface{}) *Session + Insert(...interface{}) (int64, error) + InsertOne(interface{}) (int64, error) + IsTableEmpty(bean interface{}) (bool, error) + IsTableExist(beanOrTableName interface{}) (bool, error) + Iterate(interface{}, IterFunc) error + Limit(int, ...int) *Session + MustCols(columns ...string) *Session + NoAutoCondition(...bool) *Session + NotIn(string, ...interface{}) *Session + Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session + Omit(columns ...string) *Session + OrderBy(order string) *Session + Ping() error + Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error) + QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) + QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) + Rows(bean interface{}) (*Rows, error) + SetExpr(string, interface{}) *Session + Select(string) *Session + SQL(interface{}, ...interface{}) *Session + Sum(bean interface{}, colName string) (float64, error) + SumInt(bean interface{}, colName string) (int64, error) + Sums(bean interface{}, colNames ...string) ([]float64, error) + SumsInt(bean interface{}, colNames ...string) ([]int64, error) + Table(tableNameOrBean interface{}) *Session + Unscoped() *Session + Update(bean interface{}, condiBeans ...interface{}) (int64, error) + UseBool(...string) *Session + Where(interface{}, ...interface{}) *Session +} + +// EngineInterface defines the interface which Engine, EngineGroup will implementate. +type EngineInterface interface { + Interface + + Before(func(interface{})) *Session + Charset(charset string) *Session + ClearCache(...interface{}) error + Context(context.Context) *Session + CreateTables(...interface{}) error + DBMetas() ([]*schemas.Table, error) + Dialect() dialects.Dialect + DriverName() string + DropTables(...interface{}) error + DumpAllToFile(fp string, tp ...schemas.DBType) error + GetCacher(string) caches.Cacher + GetColumnMapper() names.Mapper + GetDefaultCacher() caches.Cacher + GetTableMapper() names.Mapper + GetTZDatabase() *time.Location + GetTZLocation() *time.Location + ImportFile(fp string) ([]sql.Result, error) + MapCacher(interface{}, caches.Cacher) error + NewSession() *Session + NoAutoTime() *Session + Quote(string) string + SetCacher(string, caches.Cacher) + SetConnMaxLifetime(time.Duration) + SetColumnMapper(names.Mapper) + SetDefaultCacher(caches.Cacher) + SetLogger(logger interface{}) + SetLogLevel(log.LogLevel) + SetMapper(names.Mapper) + SetMaxOpenConns(int) + SetMaxIdleConns(int) + SetQuotePolicy(dialects.QuotePolicy) + SetSchema(string) + SetTableMapper(names.Mapper) + SetTZDatabase(tz *time.Location) + SetTZLocation(tz *time.Location) + AddHook(hook contexts.Hook) + ShowSQL(show ...bool) + Sync(...interface{}) error + Sync2(...interface{}) error + StoreEngine(storeEngine string) *Session + TableInfo(bean interface{}) (*schemas.Table, error) + TableName(interface{}, ...bool) string + UnMapType(reflect.Type) +} + +var ( + _ Interface = &Session{} + _ EngineInterface = &Engine{} + _ EngineInterface = &EngineGroup{} +) diff --git a/vendor/xorm.io/xorm/internal/json/json.go b/vendor/xorm.io/xorm/internal/json/json.go new file mode 100644 index 0000000000..c9a2eb4e24 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/json/json.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import "encoding/json" + +// JSONInterface represents an interface to handle json data +type JSONInterface interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var ( + // DefaultJSONHandler default json handler + DefaultJSONHandler JSONInterface = StdJSON{} +) + +// StdJSON implements JSONInterface via encoding/json +type StdJSON struct{} + +// Marshal implements JSONInterface +func (StdJSON) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal implements JSONInterface +func (StdJSON) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} diff --git a/vendor/xorm.io/xorm/internal/statements/cache.go b/vendor/xorm.io/xorm/internal/statements/cache.go new file mode 100644 index 0000000000..cb33df086e --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/cache.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "strings" + + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) ConvertIDSQL(sqlStr string) string { + if statement.RefTable != nil { + cols := statement.RefTable.PKColumns() + if len(cols) == 0 { + return "" + } + + colstrs := statement.joinColumns(cols, false) + sqls := utils.SplitNNoCase(sqlStr, " from ", 2) + if len(sqls) != 2 { + return "" + } + + var top string + pLimitN := statement.LimitN + if pLimitN != nil && statement.dialect.URI().DBType == schemas.MSSQL { + top = fmt.Sprintf("TOP %d ", *pLimitN) + } + + newsql := fmt.Sprintf("SELECT %s%s FROM %v", top, colstrs, sqls[1]) + return newsql + } + return "" +} + +func (statement *Statement) ConvertUpdateSQL(sqlStr string) (string, string) { + if statement.RefTable == nil || len(statement.RefTable.PrimaryKeys) != 1 { + return "", "" + } + + colstrs := statement.joinColumns(statement.RefTable.PKColumns(), true) + sqls := utils.SplitNNoCase(sqlStr, "where", 2) + if len(sqls) != 2 { + if len(sqls) == 1 { + return sqls[0], fmt.Sprintf("SELECT %v FROM %v", + colstrs, statement.quote(statement.TableName())) + } + return "", "" + } + + var whereStr = sqls[1] + + // TODO: for postgres only, if any other database? + var paraStr string + if statement.dialect.URI().DBType == schemas.POSTGRES { + paraStr = "$" + } else if statement.dialect.URI().DBType == schemas.MSSQL { + paraStr = ":" + } + + if paraStr != "" { + if strings.Contains(sqls[1], paraStr) { + dollers := strings.Split(sqls[1], paraStr) + whereStr = dollers[0] + for i, c := range dollers[1:] { + ccs := strings.SplitN(c, " ", 2) + whereStr += fmt.Sprintf(paraStr+"%v %v", i+1, ccs[1]) + } + } + } + + return sqls[0], fmt.Sprintf("SELECT %v FROM %v WHERE %v", + colstrs, statement.quote(statement.TableName()), + whereStr) +} diff --git a/vendor/xorm.io/xorm/internal/statements/column_map.go b/vendor/xorm.io/xorm/internal/statements/column_map.go new file mode 100644 index 0000000000..bb764b4e01 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/column_map.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "strings" + + "xorm.io/xorm/schemas" +) + +type columnMap []string + +func (m columnMap) Contain(colName string) bool { + if len(m) == 0 { + return false + } + + n := len(colName) + for _, mk := range m { + if len(mk) != n { + continue + } + if strings.EqualFold(mk, colName) { + return true + } + } + + return false +} + +func (m columnMap) Len() int { + return len(m) +} + +func (m columnMap) IsEmpty() bool { + return len(m) == 0 +} + +func (m *columnMap) Add(colName string) bool { + if m.Contain(colName) { + return false + } + *m = append(*m, colName) + return true +} + +func getFlagForColumn(m map[string]bool, col *schemas.Column) (val bool, has bool) { + if len(m) == 0 { + return false, false + } + + n := len(col.Name) + + for mk := range m { + if len(mk) != n { + continue + } + if strings.EqualFold(mk, col.Name) { + return m[mk], true + } + } + + return false, false +} diff --git a/vendor/xorm.io/xorm/internal/statements/expr_param.go b/vendor/xorm.io/xorm/internal/statements/expr_param.go new file mode 100644 index 0000000000..6657408e41 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/expr_param.go @@ -0,0 +1,126 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "strings" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +type ErrUnsupportedExprType struct { + tp string +} + +func (err ErrUnsupportedExprType) Error() string { + return fmt.Sprintf("Unsupported expression type: %v", err.tp) +} + +type exprParam struct { + colName string + arg interface{} +} + +type exprParams struct { + ColNames []string + Args []interface{} +} + +func (exprs *exprParams) Len() int { + return len(exprs.ColNames) +} + +func (exprs *exprParams) addParam(colName string, arg interface{}) { + exprs.ColNames = append(exprs.ColNames, colName) + exprs.Args = append(exprs.Args, arg) +} + +func (exprs *exprParams) IsColExist(colName string) bool { + for _, name := range exprs.ColNames { + if strings.EqualFold(schemas.CommonQuoter.Trim(name), schemas.CommonQuoter.Trim(colName)) { + return true + } + } + return false +} + +func (exprs *exprParams) getByName(colName string) (exprParam, bool) { + for i, name := range exprs.ColNames { + if strings.EqualFold(name, colName) { + return exprParam{name, exprs.Args[i]}, true + } + } + return exprParam{}, false +} + +func (exprs *exprParams) WriteArgs(w *builder.BytesWriter) error { + for i, expr := range exprs.Args { + switch arg := expr.(type) { + case *builder.Builder: + if _, err := w.WriteString("("); err != nil { + return err + } + if err := arg.WriteTo(w); err != nil { + return err + } + if _, err := w.WriteString(")"); err != nil { + return err + } + case string: + if arg == "" { + arg = "''" + } + if _, err := w.WriteString(fmt.Sprintf("%v", arg)); err != nil { + return err + } + default: + if _, err := w.WriteString("?"); err != nil { + return err + } + w.Append(arg) + } + if i != len(exprs.Args)-1 { + if _, err := w.WriteString(","); err != nil { + return err + } + } + } + return nil +} + +func (exprs *exprParams) writeNameArgs(w *builder.BytesWriter) error { + for i, colName := range exprs.ColNames { + if _, err := w.WriteString(colName); err != nil { + return err + } + if _, err := w.WriteString("="); err != nil { + return err + } + + switch arg := exprs.Args[i].(type) { + case *builder.Builder: + if _, err := w.WriteString("("); err != nil { + return err + } + if err := arg.WriteTo(w); err != nil { + return err + } + if _, err := w.WriteString("("); err != nil { + return err + } + default: + w.Append(exprs.Args[i]) + } + + if i+1 != len(exprs.ColNames) { + if _, err := w.WriteString(","); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/insert.go b/vendor/xorm.io/xorm/internal/statements/insert.go new file mode 100644 index 0000000000..6cbbbedaad --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/insert.go @@ -0,0 +1,207 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "strings" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) writeInsertOutput(buf *strings.Builder, table *schemas.Table) error { + if statement.dialect.URI().DBType == schemas.MSSQL && len(table.AutoIncrement) > 0 { + if _, err := buf.WriteString(" OUTPUT Inserted."); err != nil { + return err + } + if _, err := buf.WriteString(table.AutoIncrement); err != nil { + return err + } + } + return nil +} + +// GenInsertSQL generates insert beans SQL +func (statement *Statement) GenInsertSQL(colNames []string, args []interface{}) (string, []interface{}, error) { + var ( + buf = builder.NewWriter() + exprs = statement.ExprColumns + table = statement.RefTable + tableName = statement.TableName() + ) + + if _, err := buf.WriteString("INSERT INTO "); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().QuoteTo(buf.Builder, tableName); err != nil { + return "", nil, err + } + + if len(colNames) <= 0 { + if statement.dialect.URI().DBType == schemas.MYSQL { + if _, err := buf.WriteString(" VALUES ()"); err != nil { + return "", nil, err + } + } else { + if err := statement.writeInsertOutput(buf.Builder, table); err != nil { + return "", nil, err + } + if _, err := buf.WriteString(" DEFAULT VALUES"); err != nil { + return "", nil, err + } + } + } else { + if _, err := buf.WriteString(" ("); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(colNames, exprs.ColNames...), ","); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(")"); err != nil { + return "", nil, err + } + if err := statement.writeInsertOutput(buf.Builder, table); err != nil { + return "", nil, err + } + + if statement.Conds().IsValid() { + if _, err := buf.WriteString(" SELECT "); err != nil { + return "", nil, err + } + + if err := statement.WriteArgs(buf, args); err != nil { + return "", nil, err + } + + if len(exprs.Args) > 0 { + if _, err := buf.WriteString(","); err != nil { + return "", nil, err + } + } + if err := exprs.WriteArgs(buf); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(" FROM "); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().QuoteTo(buf.Builder, tableName); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(" WHERE "); err != nil { + return "", nil, err + } + + if err := statement.Conds().WriteTo(buf); err != nil { + return "", nil, err + } + } else { + if _, err := buf.WriteString(" VALUES ("); err != nil { + return "", nil, err + } + + if err := statement.WriteArgs(buf, args); err != nil { + return "", nil, err + } + + if len(exprs.Args) > 0 { + if _, err := buf.WriteString(","); err != nil { + return "", nil, err + } + } + + if err := exprs.WriteArgs(buf); err != nil { + return "", nil, err + } + + if _, err := buf.WriteString(")"); err != nil { + return "", nil, err + } + } + } + + if len(table.AutoIncrement) > 0 && statement.dialect.URI().DBType == schemas.POSTGRES { + if _, err := buf.WriteString(" RETURNING "); err != nil { + return "", nil, err + } + if err := statement.dialect.Quoter().QuoteTo(buf.Builder, table.AutoIncrement); err != nil { + return "", nil, err + } + } + + return buf.String(), buf.Args(), nil +} + +// GenInsertMapSQL generates insert map SQL +func (statement *Statement) GenInsertMapSQL(columns []string, args []interface{}) (string, []interface{}, error) { + var ( + buf = builder.NewWriter() + exprs = statement.ExprColumns + tableName = statement.TableName() + ) + + if _, err := buf.WriteString(fmt.Sprintf("INSERT INTO %s (", statement.quote(tableName))); err != nil { + return "", nil, err + } + + if err := statement.dialect.Quoter().JoinWrite(buf.Builder, append(columns, exprs.ColNames...), ","); err != nil { + return "", nil, err + } + + // if insert where + if statement.Conds().IsValid() { + if _, err := buf.WriteString(") SELECT "); err != nil { + return "", nil, err + } + + if err := statement.WriteArgs(buf, args); err != nil { + return "", nil, err + } + + if len(exprs.Args) > 0 { + if _, err := buf.WriteString(","); err != nil { + return "", nil, err + } + if err := exprs.WriteArgs(buf); err != nil { + return "", nil, err + } + } + + if _, err := buf.WriteString(fmt.Sprintf(" FROM %s WHERE ", statement.quote(tableName))); err != nil { + return "", nil, err + } + + if err := statement.Conds().WriteTo(buf); err != nil { + return "", nil, err + } + } else { + if _, err := buf.WriteString(") VALUES ("); err != nil { + return "", nil, err + } + if err := statement.WriteArgs(buf, args); err != nil { + return "", nil, err + } + + if len(exprs.Args) > 0 { + if _, err := buf.WriteString(","); err != nil { + return "", nil, err + } + if err := exprs.WriteArgs(buf); err != nil { + return "", nil, err + } + } + if _, err := buf.WriteString(")"); err != nil { + return "", nil, err + } + } + + return buf.String(), buf.Args(), nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/pk.go b/vendor/xorm.io/xorm/internal/statements/pk.go new file mode 100644 index 0000000000..59da89c0b4 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/pk.go @@ -0,0 +1,98 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "reflect" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +var ( + ptrPkType = reflect.TypeOf(&schemas.PK{}) + pkType = reflect.TypeOf(schemas.PK{}) + stringType = reflect.TypeOf("") + intType = reflect.TypeOf(int64(0)) + uintType = reflect.TypeOf(uint64(0)) +) + +// ErrIDConditionWithNoTable represents an error there is no reference table with an ID condition +type ErrIDConditionWithNoTable struct { + ID schemas.PK +} + +func (err ErrIDConditionWithNoTable) Error() string { + return fmt.Sprintf("ID condition %#v need reference table", err.ID) +} + +// IsIDConditionWithNoTableErr return true if the err is ErrIDConditionWithNoTable +func IsIDConditionWithNoTableErr(err error) bool { + _, ok := err.(ErrIDConditionWithNoTable) + return ok +} + +// ID generate "where id = ? " statement or for composite key "where key1 = ? and key2 = ?" +func (statement *Statement) ID(id interface{}) *Statement { + switch t := id.(type) { + case *schemas.PK: + statement.idParam = *t + case schemas.PK: + statement.idParam = t + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + statement.idParam = schemas.PK{id} + default: + idValue := reflect.ValueOf(id) + idType := idValue.Type() + + switch idType.Kind() { + case reflect.String: + statement.idParam = schemas.PK{idValue.Convert(stringType).Interface()} + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + statement.idParam = schemas.PK{idValue.Convert(intType).Interface()} + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + statement.idParam = schemas.PK{idValue.Convert(uintType).Interface()} + case reflect.Slice: + if idType.ConvertibleTo(pkType) { + statement.idParam = idValue.Convert(pkType).Interface().(schemas.PK) + } + case reflect.Ptr: + if idType.ConvertibleTo(ptrPkType) { + statement.idParam = idValue.Convert(ptrPkType).Elem().Interface().(schemas.PK) + } + } + } + + if statement.idParam == nil { + statement.LastError = fmt.Errorf("ID param %#v is not supported", id) + } + + return statement +} + +// ProcessIDParam handles the process of id condition +func (statement *Statement) ProcessIDParam() error { + if statement.idParam == nil { + return nil + } + + if statement.RefTable == nil { + return ErrIDConditionWithNoTable{statement.idParam} + } + + if len(statement.RefTable.PrimaryKeys) != len(statement.idParam) { + return fmt.Errorf("ID condition is error, expect %d primarykeys, there are %d", + len(statement.RefTable.PrimaryKeys), + len(statement.idParam), + ) + } + + for i, col := range statement.RefTable.PKColumns() { + var colName = statement.colName(col, statement.TableName()) + statement.cond = statement.cond.And(builder.Eq{colName: statement.idParam[i]}) + } + return nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/query.go b/vendor/xorm.io/xorm/internal/statements/query.go new file mode 100644 index 0000000000..ab3021bf6d --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/query.go @@ -0,0 +1,441 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "errors" + "fmt" + "reflect" + "strings" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) GenQuerySQL(sqlOrArgs ...interface{}) (string, []interface{}, error) { + if len(sqlOrArgs) > 0 { + return statement.ConvertSQLOrArgs(sqlOrArgs...) + } + + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + if len(statement.TableName()) <= 0 { + return "", nil, ErrTableNotFound + } + + var columnStr = statement.ColumnStr() + if len(statement.SelectStr) > 0 { + columnStr = statement.SelectStr + } else { + if statement.JoinStr == "" { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = statement.genColumnStr() + } + } + } else { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = "*" + } + } + } + if columnStr == "" { + columnStr = "*" + } + } + + if err := statement.ProcessIDParam(); err != nil { + return "", nil, err + } + + sqlStr, condArgs, err := statement.genSelectSQL(columnStr, true, true) + if err != nil { + return "", nil, err + } + args := append(statement.joinArgs, condArgs...) + + // for mssql and use limit + qs := strings.Count(sqlStr, "?") + if len(args)*2 == qs { + args = append(args, args...) + } + + return sqlStr, args, nil +} + +func (statement *Statement) GenSumSQL(bean interface{}, columns ...string) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + statement.SetRefBean(bean) + + var sumStrs = make([]string, 0, len(columns)) + for _, colName := range columns { + if !strings.Contains(colName, " ") && !strings.Contains(colName, "(") { + colName = statement.quote(colName) + } else { + colName = statement.ReplaceQuote(colName) + } + sumStrs = append(sumStrs, fmt.Sprintf("COALESCE(sum(%s),0)", colName)) + } + sumSelect := strings.Join(sumStrs, ", ") + + if err := statement.mergeConds(bean); err != nil { + return "", nil, err + } + + sqlStr, condArgs, err := statement.genSelectSQL(sumSelect, true, true) + if err != nil { + return "", nil, err + } + + return sqlStr, append(statement.joinArgs, condArgs...), nil +} + +func (statement *Statement) GenGetSQL(bean interface{}) (string, []interface{}, error) { + v := rValue(bean) + isStruct := v.Kind() == reflect.Struct + if isStruct { + statement.SetRefBean(bean) + } + + var columnStr = statement.ColumnStr() + if len(statement.SelectStr) > 0 { + columnStr = statement.SelectStr + } else { + // TODO: always generate column names, not use * even if join + if len(statement.JoinStr) == 0 { + if len(columnStr) == 0 { + if len(statement.GroupByStr) > 0 { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = statement.genColumnStr() + } + } + } else { + if len(columnStr) == 0 { + if len(statement.GroupByStr) > 0 { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } + } + } + } + + if len(columnStr) == 0 { + columnStr = "*" + } + + if isStruct { + if err := statement.mergeConds(bean); err != nil { + return "", nil, err + } + } else { + if err := statement.ProcessIDParam(); err != nil { + return "", nil, err + } + } + + sqlStr, condArgs, err := statement.genSelectSQL(columnStr, true, true) + if err != nil { + return "", nil, err + } + + return sqlStr, append(statement.joinArgs, condArgs...), nil +} + +// GenCountSQL generates the SQL for counting +func (statement *Statement) GenCountSQL(beans ...interface{}) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + var condArgs []interface{} + var err error + if len(beans) > 0 { + statement.SetRefBean(beans[0]) + if err := statement.mergeConds(beans[0]); err != nil { + return "", nil, err + } + } + + var selectSQL = statement.SelectStr + if len(selectSQL) <= 0 { + if statement.IsDistinct { + selectSQL = fmt.Sprintf("count(DISTINCT %s)", statement.ColumnStr()) + } else if statement.ColumnStr() != "" { + selectSQL = fmt.Sprintf("count(%s)", statement.ColumnStr()) + } else { + selectSQL = "count(*)" + } + } + sqlStr, condArgs, err := statement.genSelectSQL(selectSQL, false, false) + if err != nil { + return "", nil, err + } + + return sqlStr, append(statement.joinArgs, condArgs...), nil +} + +func (statement *Statement) genSelectSQL(columnStr string, needLimit, needOrderBy bool) (string, []interface{}, error) { + var ( + distinct string + dialect = statement.dialect + quote = statement.quote + fromStr = " FROM " + top, mssqlCondi, whereStr string + ) + if statement.IsDistinct && !strings.HasPrefix(columnStr, "count") { + distinct = "DISTINCT " + } + + condSQL, condArgs, err := statement.GenCondSQL(statement.cond) + if err != nil { + return "", nil, err + } + if len(condSQL) > 0 { + whereStr = " WHERE " + condSQL + } + + if dialect.URI().DBType == schemas.MSSQL && strings.Contains(statement.TableName(), "..") { + fromStr += statement.TableName() + } else { + fromStr += quote(statement.TableName()) + } + + if statement.TableAlias != "" { + if dialect.URI().DBType == schemas.ORACLE { + fromStr += " " + quote(statement.TableAlias) + } else { + fromStr += " AS " + quote(statement.TableAlias) + } + } + if statement.JoinStr != "" { + fromStr = fmt.Sprintf("%v %v", fromStr, statement.JoinStr) + } + + pLimitN := statement.LimitN + if dialect.URI().DBType == schemas.MSSQL { + if pLimitN != nil { + LimitNValue := *pLimitN + top = fmt.Sprintf("TOP %d ", LimitNValue) + } + if statement.Start > 0 { + var column string + if len(statement.RefTable.PKColumns()) == 0 { + for _, index := range statement.RefTable.Indexes { + if len(index.Cols) == 1 { + column = index.Cols[0] + break + } + } + if len(column) == 0 { + column = statement.RefTable.ColumnsSeq()[0] + } + } else { + column = statement.RefTable.PKColumns()[0].Name + } + if statement.needTableName() { + if len(statement.TableAlias) > 0 { + column = statement.TableAlias + "." + column + } else { + column = statement.TableName() + "." + column + } + } + + var orderStr string + if needOrderBy && len(statement.OrderStr) > 0 { + orderStr = " ORDER BY " + statement.OrderStr + } + + var groupStr string + if len(statement.GroupByStr) > 0 { + groupStr = " GROUP BY " + statement.GroupByStr + } + mssqlCondi = fmt.Sprintf("(%s NOT IN (SELECT TOP %d %s%s%s%s%s))", + column, statement.Start, column, fromStr, whereStr, orderStr, groupStr) + } + } + + var buf strings.Builder + fmt.Fprintf(&buf, "SELECT %v%v%v%v%v", distinct, top, columnStr, fromStr, whereStr) + if len(mssqlCondi) > 0 { + if len(whereStr) > 0 { + fmt.Fprint(&buf, " AND ", mssqlCondi) + } else { + fmt.Fprint(&buf, " WHERE ", mssqlCondi) + } + } + + if statement.GroupByStr != "" { + fmt.Fprint(&buf, " GROUP BY ", statement.GroupByStr) + } + if statement.HavingStr != "" { + fmt.Fprint(&buf, " ", statement.HavingStr) + } + if needOrderBy && statement.OrderStr != "" { + fmt.Fprint(&buf, " ORDER BY ", statement.OrderStr) + } + if needLimit { + if dialect.URI().DBType != schemas.MSSQL && dialect.URI().DBType != schemas.ORACLE { + if statement.Start > 0 { + if pLimitN != nil { + fmt.Fprintf(&buf, " LIMIT %v OFFSET %v", *pLimitN, statement.Start) + } else { + fmt.Fprintf(&buf, "LIMIT 0 OFFSET %v", statement.Start) + } + } else if pLimitN != nil { + fmt.Fprint(&buf, " LIMIT ", *pLimitN) + } + } else if dialect.URI().DBType == schemas.ORACLE { + if statement.Start != 0 || pLimitN != nil { + oldString := buf.String() + buf.Reset() + rawColStr := columnStr + if rawColStr == "*" { + rawColStr = "at.*" + } + fmt.Fprintf(&buf, "SELECT %v FROM (SELECT %v,ROWNUM RN FROM (%v) at WHERE ROWNUM <= %d) aat WHERE RN > %d", + columnStr, rawColStr, oldString, statement.Start+*pLimitN, statement.Start) + } + } + } + if statement.IsForUpdate { + return dialect.ForUpdateSQL(buf.String()), condArgs, nil + } + + return buf.String(), condArgs, nil +} + +func (statement *Statement) GenExistSQL(bean ...interface{}) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + var sqlStr string + var args []interface{} + var joinStr string + var err error + if len(bean) == 0 { + tableName := statement.TableName() + if len(tableName) <= 0 { + return "", nil, ErrTableNotFound + } + + tableName = statement.quote(tableName) + if len(statement.JoinStr) > 0 { + joinStr = statement.JoinStr + } + + if statement.Conds().IsValid() { + condSQL, condArgs, err := statement.GenCondSQL(statement.Conds()) + if err != nil { + return "", nil, err + } + + if statement.dialect.URI().DBType == schemas.MSSQL { + sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s %s WHERE %s", tableName, joinStr, condSQL) + } else if statement.dialect.URI().DBType == schemas.ORACLE { + sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE (%s) %s AND ROWNUM=1", tableName, joinStr, condSQL) + } else { + sqlStr = fmt.Sprintf("SELECT * FROM %s %s WHERE %s LIMIT 1", tableName, joinStr, condSQL) + } + args = condArgs + } else { + if statement.dialect.URI().DBType == schemas.MSSQL { + sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s %s", tableName, joinStr) + } else if statement.dialect.URI().DBType == schemas.ORACLE { + sqlStr = fmt.Sprintf("SELECT * FROM %s %s WHERE ROWNUM=1", tableName, joinStr) + } else { + sqlStr = fmt.Sprintf("SELECT * FROM %s %s LIMIT 1", tableName, joinStr) + } + args = []interface{}{} + } + } else { + beanValue := reflect.ValueOf(bean[0]) + if beanValue.Kind() != reflect.Ptr { + return "", nil, errors.New("needs a pointer") + } + + if beanValue.Elem().Kind() == reflect.Struct { + if err := statement.SetRefBean(bean[0]); err != nil { + return "", nil, err + } + } + + if len(statement.TableName()) <= 0 { + return "", nil, ErrTableNotFound + } + statement.Limit(1) + sqlStr, args, err = statement.GenGetSQL(bean[0]) + if err != nil { + return "", nil, err + } + } + + return sqlStr, args, nil +} + +func (statement *Statement) GenFindSQL(autoCond builder.Cond) (string, []interface{}, error) { + if statement.RawSQL != "" { + return statement.GenRawSQL(), statement.RawParams, nil + } + + var sqlStr string + var args []interface{} + var err error + + if len(statement.TableName()) <= 0 { + return "", nil, ErrTableNotFound + } + + var columnStr = statement.ColumnStr() + if len(statement.SelectStr) > 0 { + columnStr = statement.SelectStr + } else { + if statement.JoinStr == "" { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = statement.genColumnStr() + } + } + } else { + if columnStr == "" { + if statement.GroupByStr != "" { + columnStr = statement.quoteColumnStr(statement.GroupByStr) + } else { + columnStr = "*" + } + } + } + if columnStr == "" { + columnStr = "*" + } + } + + statement.cond = statement.cond.And(autoCond) + + sqlStr, condArgs, err := statement.genSelectSQL(columnStr, true, true) + if err != nil { + return "", nil, err + } + args = append(statement.joinArgs, condArgs...) + // for mssql and use limit + qs := strings.Count(sqlStr, "?") + if len(args)*2 == qs { + args = append(args, args...) + } + + return sqlStr, args, nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/statement.go b/vendor/xorm.io/xorm/internal/statements/statement.go new file mode 100644 index 0000000000..ed7bdaeb62 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/statement.go @@ -0,0 +1,995 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "strings" + "time" + + "xorm.io/builder" + "xorm.io/xorm/contexts" + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" + "xorm.io/xorm/tags" +) + +var ( + // ErrConditionType condition type unsupported + ErrConditionType = errors.New("Unsupported condition type") + // ErrUnSupportedSQLType parameter of SQL is not supported + ErrUnSupportedSQLType = errors.New("Unsupported sql type") + // ErrUnSupportedType unsupported error + ErrUnSupportedType = errors.New("Unsupported type error") + // ErrTableNotFound table not found error + ErrTableNotFound = errors.New("Table not found") +) + +// Statement save all the sql info for executing SQL +type Statement struct { + RefTable *schemas.Table + dialect dialects.Dialect + defaultTimeZone *time.Location + tagParser *tags.Parser + Start int + LimitN *int + idParam schemas.PK + OrderStr string + JoinStr string + joinArgs []interface{} + GroupByStr string + HavingStr string + SelectStr string + useAllCols bool + AltTableName string + tableName string + RawSQL string + RawParams []interface{} + UseCascade bool + UseAutoJoin bool + StoreEngine string + Charset string + UseCache bool + UseAutoTime bool + NoAutoCondition bool + IsDistinct bool + IsForUpdate bool + TableAlias string + allUseBool bool + CheckVersion bool + unscoped bool + ColumnMap columnMap + OmitColumnMap columnMap + MustColumnMap map[string]bool + NullableMap map[string]bool + IncrColumns exprParams + DecrColumns exprParams + ExprColumns exprParams + cond builder.Cond + BufferSize int + Context contexts.ContextCache + LastError error +} + +// NewStatement creates a new statement +func NewStatement(dialect dialects.Dialect, tagParser *tags.Parser, defaultTimeZone *time.Location) *Statement { + statement := &Statement{ + dialect: dialect, + tagParser: tagParser, + defaultTimeZone: defaultTimeZone, + } + statement.Reset() + return statement +} + +func (statement *Statement) SetTableName(tableName string) { + statement.tableName = tableName +} + +func (statement *Statement) omitStr() string { + return statement.dialect.Quoter().Join(statement.OmitColumnMap, " ,") +} + +// GenRawSQL generates correct raw sql +func (statement *Statement) GenRawSQL() string { + return statement.ReplaceQuote(statement.RawSQL) +} + +func (statement *Statement) GenCondSQL(condOrBuilder interface{}) (string, []interface{}, error) { + condSQL, condArgs, err := builder.ToSQL(condOrBuilder) + if err != nil { + return "", nil, err + } + return statement.ReplaceQuote(condSQL), condArgs, nil +} + +func (statement *Statement) ReplaceQuote(sql string) string { + if sql == "" || statement.dialect.URI().DBType == schemas.MYSQL || + statement.dialect.URI().DBType == schemas.SQLITE { + return sql + } + return statement.dialect.Quoter().Replace(sql) +} + +func (statement *Statement) SetContextCache(ctxCache contexts.ContextCache) { + statement.Context = ctxCache +} + +// Init reset all the statement's fields +func (statement *Statement) Reset() { + statement.RefTable = nil + statement.Start = 0 + statement.LimitN = nil + statement.OrderStr = "" + statement.UseCascade = true + statement.JoinStr = "" + statement.joinArgs = make([]interface{}, 0) + statement.GroupByStr = "" + statement.HavingStr = "" + statement.ColumnMap = columnMap{} + statement.OmitColumnMap = columnMap{} + statement.AltTableName = "" + statement.tableName = "" + statement.idParam = nil + statement.RawSQL = "" + statement.RawParams = make([]interface{}, 0) + statement.UseCache = true + statement.UseAutoTime = true + statement.NoAutoCondition = false + statement.IsDistinct = false + statement.IsForUpdate = false + statement.TableAlias = "" + statement.SelectStr = "" + statement.allUseBool = false + statement.useAllCols = false + statement.MustColumnMap = make(map[string]bool) + statement.NullableMap = make(map[string]bool) + statement.CheckVersion = true + statement.unscoped = false + statement.IncrColumns = exprParams{} + statement.DecrColumns = exprParams{} + statement.ExprColumns = exprParams{} + statement.cond = builder.NewCond() + statement.BufferSize = 0 + statement.Context = nil + statement.LastError = nil +} + +// NoAutoCondition if you do not want convert bean's field as query condition, then use this function +func (statement *Statement) SetNoAutoCondition(no ...bool) *Statement { + statement.NoAutoCondition = true + if len(no) > 0 { + statement.NoAutoCondition = no[0] + } + return statement +} + +// Alias set the table alias +func (statement *Statement) Alias(alias string) *Statement { + statement.TableAlias = alias + return statement +} + +// SQL adds raw sql statement +func (statement *Statement) SQL(query interface{}, args ...interface{}) *Statement { + switch query.(type) { + case (*builder.Builder): + var err error + statement.RawSQL, statement.RawParams, err = query.(*builder.Builder).ToSQL() + if err != nil { + statement.LastError = err + } + case string: + statement.RawSQL = query.(string) + statement.RawParams = args + default: + statement.LastError = ErrUnSupportedSQLType + } + + return statement +} + +// Where add Where statement +func (statement *Statement) Where(query interface{}, args ...interface{}) *Statement { + return statement.And(query, args...) +} + +func (statement *Statement) quote(s string) string { + return statement.dialect.Quoter().Quote(s) +} + +// And add Where & and statement +func (statement *Statement) And(query interface{}, args ...interface{}) *Statement { + switch query.(type) { + case string: + cond := builder.Expr(query.(string), args...) + statement.cond = statement.cond.And(cond) + case map[string]interface{}: + queryMap := query.(map[string]interface{}) + newMap := make(map[string]interface{}) + for k, v := range queryMap { + newMap[statement.quote(k)] = v + } + statement.cond = statement.cond.And(builder.Eq(newMap)) + case builder.Cond: + cond := query.(builder.Cond) + statement.cond = statement.cond.And(cond) + for _, v := range args { + if vv, ok := v.(builder.Cond); ok { + statement.cond = statement.cond.And(vv) + } + } + default: + statement.LastError = ErrConditionType + } + + return statement +} + +// Or add Where & Or statement +func (statement *Statement) Or(query interface{}, args ...interface{}) *Statement { + switch query.(type) { + case string: + cond := builder.Expr(query.(string), args...) + statement.cond = statement.cond.Or(cond) + case map[string]interface{}: + cond := builder.Eq(query.(map[string]interface{})) + statement.cond = statement.cond.Or(cond) + case builder.Cond: + cond := query.(builder.Cond) + statement.cond = statement.cond.Or(cond) + for _, v := range args { + if vv, ok := v.(builder.Cond); ok { + statement.cond = statement.cond.Or(vv) + } + } + default: + // TODO: not support condition type + } + return statement +} + +// In generate "Where column IN (?) " statement +func (statement *Statement) In(column string, args ...interface{}) *Statement { + in := builder.In(statement.quote(column), args...) + statement.cond = statement.cond.And(in) + return statement +} + +// NotIn generate "Where column NOT IN (?) " statement +func (statement *Statement) NotIn(column string, args ...interface{}) *Statement { + notIn := builder.NotIn(statement.quote(column), args...) + statement.cond = statement.cond.And(notIn) + return statement +} + +func (statement *Statement) SetRefValue(v reflect.Value) error { + var err error + statement.RefTable, err = statement.tagParser.ParseWithCache(reflect.Indirect(v)) + if err != nil { + return err + } + statement.tableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), v, true) + return nil +} + +func rValue(bean interface{}) reflect.Value { + return reflect.Indirect(reflect.ValueOf(bean)) +} + +func (statement *Statement) SetRefBean(bean interface{}) error { + var err error + statement.RefTable, err = statement.tagParser.ParseWithCache(rValue(bean)) + if err != nil { + return err + } + statement.tableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), bean, true) + return nil +} + +func (statement *Statement) needTableName() bool { + return len(statement.JoinStr) > 0 +} + +func (statement *Statement) colName(col *schemas.Column, tableName string) string { + if statement.needTableName() { + var nm = tableName + if len(statement.TableAlias) > 0 { + nm = statement.TableAlias + } + return statement.quote(nm) + "." + statement.quote(col.Name) + } + return statement.quote(col.Name) +} + +// TableName return current tableName +func (statement *Statement) TableName() string { + if statement.AltTableName != "" { + return statement.AltTableName + } + + return statement.tableName +} + +// Incr Generate "Update ... Set column = column + arg" statement +func (statement *Statement) Incr(column string, arg ...interface{}) *Statement { + if len(arg) > 0 { + statement.IncrColumns.addParam(column, arg[0]) + } else { + statement.IncrColumns.addParam(column, 1) + } + return statement +} + +// Decr Generate "Update ... Set column = column - arg" statement +func (statement *Statement) Decr(column string, arg ...interface{}) *Statement { + if len(arg) > 0 { + statement.DecrColumns.addParam(column, arg[0]) + } else { + statement.DecrColumns.addParam(column, 1) + } + return statement +} + +// SetExpr Generate "Update ... Set column = {expression}" statement +func (statement *Statement) SetExpr(column string, expression interface{}) *Statement { + if e, ok := expression.(string); ok { + statement.ExprColumns.addParam(column, statement.dialect.Quoter().Replace(e)) + } else { + statement.ExprColumns.addParam(column, expression) + } + return statement +} + +// Distinct generates "DISTINCT col1, col2 " statement +func (statement *Statement) Distinct(columns ...string) *Statement { + statement.IsDistinct = true + statement.Cols(columns...) + return statement +} + +// ForUpdate generates "SELECT ... FOR UPDATE" statement +func (statement *Statement) ForUpdate() *Statement { + statement.IsForUpdate = true + return statement +} + +// Select replace select +func (statement *Statement) Select(str string) *Statement { + statement.SelectStr = statement.ReplaceQuote(str) + return statement +} + +func col2NewCols(columns ...string) []string { + newColumns := make([]string, 0, len(columns)) + for _, col := range columns { + col = strings.Replace(col, "`", "", -1) + col = strings.Replace(col, `"`, "", -1) + ccols := strings.Split(col, ",") + for _, c := range ccols { + newColumns = append(newColumns, strings.TrimSpace(c)) + } + } + return newColumns +} + +// Cols generate "col1, col2" statement +func (statement *Statement) Cols(columns ...string) *Statement { + cols := col2NewCols(columns...) + for _, nc := range cols { + statement.ColumnMap.Add(nc) + } + return statement +} + +func (statement *Statement) ColumnStr() string { + return statement.dialect.Quoter().Join(statement.ColumnMap, ", ") +} + +// AllCols update use only: update all columns +func (statement *Statement) AllCols() *Statement { + statement.useAllCols = true + return statement +} + +// MustCols update use only: must update columns +func (statement *Statement) MustCols(columns ...string) *Statement { + newColumns := col2NewCols(columns...) + for _, nc := range newColumns { + statement.MustColumnMap[strings.ToLower(nc)] = true + } + return statement +} + +// UseBool indicates that use bool fields as update contents and query contiditions +func (statement *Statement) UseBool(columns ...string) *Statement { + if len(columns) > 0 { + statement.MustCols(columns...) + } else { + statement.allUseBool = true + } + return statement +} + +// Omit do not use the columns +func (statement *Statement) Omit(columns ...string) { + newColumns := col2NewCols(columns...) + for _, nc := range newColumns { + statement.OmitColumnMap = append(statement.OmitColumnMap, nc) + } +} + +// Nullable Update use only: update columns to null when value is nullable and zero-value +func (statement *Statement) Nullable(columns ...string) { + newColumns := col2NewCols(columns...) + for _, nc := range newColumns { + statement.NullableMap[strings.ToLower(nc)] = true + } +} + +// Top generate LIMIT limit statement +func (statement *Statement) Top(limit int) *Statement { + statement.Limit(limit) + return statement +} + +// Limit generate LIMIT start, limit statement +func (statement *Statement) Limit(limit int, start ...int) *Statement { + statement.LimitN = &limit + if len(start) > 0 { + statement.Start = start[0] + } + return statement +} + +// OrderBy generate "Order By order" statement +func (statement *Statement) OrderBy(order string) *Statement { + if len(statement.OrderStr) > 0 { + statement.OrderStr += ", " + } + statement.OrderStr += statement.ReplaceQuote(order) + return statement +} + +// Desc generate `ORDER BY xx DESC` +func (statement *Statement) Desc(colNames ...string) *Statement { + var buf strings.Builder + if len(statement.OrderStr) > 0 { + fmt.Fprint(&buf, statement.OrderStr, ", ") + } + for i, col := range colNames { + if i > 0 { + fmt.Fprint(&buf, ", ") + } + statement.dialect.Quoter().QuoteTo(&buf, col) + fmt.Fprint(&buf, " DESC") + } + statement.OrderStr = buf.String() + return statement +} + +// Asc provide asc order by query condition, the input parameters are columns. +func (statement *Statement) Asc(colNames ...string) *Statement { + var buf strings.Builder + if len(statement.OrderStr) > 0 { + fmt.Fprint(&buf, statement.OrderStr, ", ") + } + for i, col := range colNames { + if i > 0 { + fmt.Fprint(&buf, ", ") + } + statement.dialect.Quoter().QuoteTo(&buf, col) + fmt.Fprint(&buf, " ASC") + } + statement.OrderStr = buf.String() + return statement +} + +func (statement *Statement) Conds() builder.Cond { + return statement.cond +} + +// Table tempororily set table name, the parameter could be a string or a pointer of struct +func (statement *Statement) SetTable(tableNameOrBean interface{}) error { + v := rValue(tableNameOrBean) + t := v.Type() + if t.Kind() == reflect.Struct { + var err error + statement.RefTable, err = statement.tagParser.ParseWithCache(v) + if err != nil { + return err + } + } + + statement.AltTableName = dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), tableNameOrBean, true) + return nil +} + +// Join The joinOP should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN +func (statement *Statement) Join(joinOP string, tablename interface{}, condition string, args ...interface{}) *Statement { + var buf strings.Builder + if len(statement.JoinStr) > 0 { + fmt.Fprintf(&buf, "%v %v JOIN ", statement.JoinStr, joinOP) + } else { + fmt.Fprintf(&buf, "%v JOIN ", joinOP) + } + + switch tp := tablename.(type) { + case builder.Builder: + subSQL, subQueryArgs, err := tp.ToSQL() + if err != nil { + statement.LastError = err + return statement + } + + fields := strings.Split(tp.TableName(), ".") + aliasName := statement.dialect.Quoter().Trim(fields[len(fields)-1]) + aliasName = schemas.CommonQuoter.Trim(aliasName) + + fmt.Fprintf(&buf, "(%s) %s ON %v", statement.ReplaceQuote(subSQL), aliasName, statement.ReplaceQuote(condition)) + statement.joinArgs = append(statement.joinArgs, subQueryArgs...) + case *builder.Builder: + subSQL, subQueryArgs, err := tp.ToSQL() + if err != nil { + statement.LastError = err + return statement + } + + fields := strings.Split(tp.TableName(), ".") + aliasName := statement.dialect.Quoter().Trim(fields[len(fields)-1]) + aliasName = schemas.CommonQuoter.Trim(aliasName) + + fmt.Fprintf(&buf, "(%s) %s ON %v", statement.ReplaceQuote(subSQL), aliasName, statement.ReplaceQuote(condition)) + statement.joinArgs = append(statement.joinArgs, subQueryArgs...) + default: + tbName := dialects.FullTableName(statement.dialect, statement.tagParser.GetTableMapper(), tablename, true) + if !utils.IsSubQuery(tbName) { + var buf strings.Builder + statement.dialect.Quoter().QuoteTo(&buf, tbName) + tbName = buf.String() + } + fmt.Fprintf(&buf, "%s ON %v", tbName, statement.ReplaceQuote(condition)) + } + + statement.JoinStr = buf.String() + statement.joinArgs = append(statement.joinArgs, args...) + return statement +} + +// tbName get some table's table name +func (statement *Statement) tbNameNoSchema(table *schemas.Table) string { + if len(statement.AltTableName) > 0 { + return statement.AltTableName + } + + return table.Name +} + +// GroupBy generate "Group By keys" statement +func (statement *Statement) GroupBy(keys string) *Statement { + statement.GroupByStr = statement.ReplaceQuote(keys) + return statement +} + +// Having generate "Having conditions" statement +func (statement *Statement) Having(conditions string) *Statement { + statement.HavingStr = fmt.Sprintf("HAVING %v", statement.ReplaceQuote(conditions)) + return statement +} + +// Unscoped always disable struct tag "deleted" +func (statement *Statement) SetUnscoped() *Statement { + statement.unscoped = true + return statement +} + +func (statement *Statement) GetUnscoped() bool { + return statement.unscoped +} + +func (statement *Statement) genColumnStr() string { + if statement.RefTable == nil { + return "" + } + + var buf strings.Builder + columns := statement.RefTable.Columns() + + for _, col := range columns { + if statement.OmitColumnMap.Contain(col.Name) { + continue + } + + if len(statement.ColumnMap) > 0 && !statement.ColumnMap.Contain(col.Name) { + continue + } + + if col.MapType == schemas.ONLYTODB { + continue + } + + if buf.Len() != 0 { + buf.WriteString(", ") + } + + if statement.JoinStr != "" { + if statement.TableAlias != "" { + buf.WriteString(statement.TableAlias) + } else { + buf.WriteString(statement.TableName()) + } + + buf.WriteString(".") + } + + statement.dialect.Quoter().QuoteTo(&buf, col.Name) + } + + return buf.String() +} + +func (statement *Statement) GenCreateTableSQL() []string { + statement.RefTable.StoreEngine = statement.StoreEngine + statement.RefTable.Charset = statement.Charset + s, _ := statement.dialect.CreateTableSQL(statement.RefTable, statement.TableName()) + return s +} + +func (statement *Statement) GenIndexSQL() []string { + var sqls []string + tbName := statement.TableName() + for _, index := range statement.RefTable.Indexes { + if index.Type == schemas.IndexType { + sql := statement.dialect.CreateIndexSQL(tbName, index) + sqls = append(sqls, sql) + } + } + return sqls +} + +func uniqueName(tableName, uqeName string) string { + return fmt.Sprintf("UQE_%v_%v", tableName, uqeName) +} + +func (statement *Statement) GenUniqueSQL() []string { + var sqls []string + tbName := statement.TableName() + for _, index := range statement.RefTable.Indexes { + if index.Type == schemas.UniqueType { + sql := statement.dialect.CreateIndexSQL(tbName, index) + sqls = append(sqls, sql) + } + } + return sqls +} + +func (statement *Statement) GenDelIndexSQL() []string { + var sqls []string + tbName := statement.TableName() + idx := strings.Index(tbName, ".") + if idx > -1 { + tbName = tbName[idx+1:] + } + for _, index := range statement.RefTable.Indexes { + sqls = append(sqls, statement.dialect.DropIndexSQL(tbName, index)) + } + return sqls +} + +func (statement *Statement) buildConds2(table *schemas.Table, bean interface{}, + includeVersion bool, includeUpdated bool, includeNil bool, + includeAutoIncr bool, allUseBool bool, useAllCols bool, unscoped bool, + mustColumnMap map[string]bool, tableName, aliasName string, addedTableName bool) (builder.Cond, error) { + var conds []builder.Cond + for _, col := range table.Columns() { + if !includeVersion && col.IsVersion { + continue + } + if !includeUpdated && col.IsUpdated { + continue + } + if !includeAutoIncr && col.IsAutoIncrement { + continue + } + + if statement.dialect.URI().DBType == schemas.MSSQL && (col.SQLType.Name == schemas.Text || + col.SQLType.IsBlob() || col.SQLType.Name == schemas.TimeStampz) { + continue + } + if col.SQLType.IsJson() { + continue + } + + var colName string + if addedTableName { + var nm = tableName + if len(aliasName) > 0 { + nm = aliasName + } + colName = statement.quote(nm) + "." + statement.quote(col.Name) + } else { + colName = statement.quote(col.Name) + } + + fieldValuePtr, err := col.ValueOf(bean) + if err != nil { + if !strings.Contains(err.Error(), "is not valid") { + //engine.logger.Warn(err) + } + continue + } + + if col.IsDeleted && !unscoped { // tag "deleted" is enabled + conds = append(conds, statement.CondDeleted(col)) + } + + fieldValue := *fieldValuePtr + if fieldValue.Interface() == nil { + continue + } + + fieldType := reflect.TypeOf(fieldValue.Interface()) + requiredField := useAllCols + + if b, ok := getFlagForColumn(mustColumnMap, col); ok { + if b { + requiredField = true + } else { + continue + } + } + + if fieldType.Kind() == reflect.Ptr { + if fieldValue.IsNil() { + if includeNil { + conds = append(conds, builder.Eq{colName: nil}) + } + continue + } else if !fieldValue.IsValid() { + continue + } else { + // dereference ptr type to instance type + fieldValue = fieldValue.Elem() + fieldType = reflect.TypeOf(fieldValue.Interface()) + requiredField = true + } + } + + var val interface{} + switch fieldType.Kind() { + case reflect.Bool: + if allUseBool || requiredField { + val = fieldValue.Interface() + } else { + // if a bool in a struct, it will not be as a condition because it default is false, + // please use Where() instead + continue + } + case reflect.String: + if !requiredField && fieldValue.String() == "" { + continue + } + // for MyString, should convert to string or panic + if fieldType.String() != reflect.String.String() { + val = fieldValue.String() + } else { + val = fieldValue.Interface() + } + case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64: + if !requiredField && fieldValue.Int() == 0 { + continue + } + val = fieldValue.Interface() + case reflect.Float32, reflect.Float64: + if !requiredField && fieldValue.Float() == 0.0 { + continue + } + val = fieldValue.Interface() + case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: + if !requiredField && fieldValue.Uint() == 0 { + continue + } + val = fieldValue.Interface() + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) + if !requiredField && (t.IsZero() || !fieldValue.IsValid()) { + continue + } + val = dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) + } else if _, ok := reflect.New(fieldType).Interface().(convert.Conversion); ok { + continue + } else if valNul, ok := fieldValue.Interface().(driver.Valuer); ok { + val, _ = valNul.Value() + if val == nil && !requiredField { + continue + } + } else { + if col.SQLType.IsJson() { + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = string(bytes) + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = bytes + } + } else { + table, err := statement.tagParser.ParseWithCache(fieldValue) + if err != nil { + val = fieldValue.Interface() + } else { + if len(table.PrimaryKeys) == 1 { + pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName) + // fix non-int pk issues + //if pkField.Int() != 0 { + if pkField.IsValid() && !utils.IsZero(pkField.Interface()) { + val = pkField.Interface() + } else { + continue + } + } else { + //TODO: how to handler? + return nil, fmt.Errorf("not supported %v as %v", fieldValue.Interface(), table.PrimaryKeys) + } + } + } + } + case reflect.Array: + continue + case reflect.Slice, reflect.Map: + if fieldValue == reflect.Zero(fieldType) { + continue + } + if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 { + continue + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = string(bytes) + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + if (fieldType.Kind() == reflect.Array || fieldType.Kind() == reflect.Slice) && + fieldType.Elem().Kind() == reflect.Uint8 { + if fieldValue.Len() > 0 { + val = fieldValue.Bytes() + } else { + continue + } + } else { + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + val = bytes + } + } else { + continue + } + default: + val = fieldValue.Interface() + } + + conds = append(conds, builder.Eq{colName: val}) + } + + return builder.And(conds...), nil +} + +func (statement *Statement) BuildConds(table *schemas.Table, bean interface{}, includeVersion bool, includeUpdated bool, includeNil bool, includeAutoIncr bool, addedTableName bool) (builder.Cond, error) { + return statement.buildConds2(table, bean, includeVersion, includeUpdated, includeNil, includeAutoIncr, statement.allUseBool, statement.useAllCols, + statement.unscoped, statement.MustColumnMap, statement.TableName(), statement.TableAlias, addedTableName) +} + +func (statement *Statement) mergeConds(bean interface{}) error { + if !statement.NoAutoCondition && statement.RefTable != nil { + var addedTableName = (len(statement.JoinStr) > 0) + autoCond, err := statement.BuildConds(statement.RefTable, bean, true, true, false, true, addedTableName) + if err != nil { + return err + } + statement.cond = statement.cond.And(autoCond) + } + + if err := statement.ProcessIDParam(); err != nil { + return err + } + return nil +} + +func (statement *Statement) GenConds(bean interface{}) (string, []interface{}, error) { + if err := statement.mergeConds(bean); err != nil { + return "", nil, err + } + + return statement.GenCondSQL(statement.cond) +} + +func (statement *Statement) quoteColumnStr(columnStr string) string { + columns := strings.Split(columnStr, ",") + return statement.dialect.Quoter().Join(columns, ",") +} + +func (statement *Statement) ConvertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) { + sql, args, err := convertSQLOrArgs(sqlOrArgs...) + if err != nil { + return "", nil, err + } + return statement.ReplaceQuote(sql), args, nil +} + +func convertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) { + switch sqlOrArgs[0].(type) { + case string: + return sqlOrArgs[0].(string), sqlOrArgs[1:], nil + case *builder.Builder: + return sqlOrArgs[0].(*builder.Builder).ToSQL() + case builder.Builder: + bd := sqlOrArgs[0].(builder.Builder) + return bd.ToSQL() + } + + return "", nil, ErrUnSupportedType +} + +func (statement *Statement) joinColumns(cols []*schemas.Column, includeTableName bool) string { + var colnames = make([]string, len(cols)) + for i, col := range cols { + if includeTableName { + colnames[i] = statement.quote(statement.TableName()) + + "." + statement.quote(col.Name) + } else { + colnames[i] = statement.quote(col.Name) + } + } + return strings.Join(colnames, ", ") +} + +// CondDeleted returns the conditions whether a record is soft deleted. +func (statement *Statement) CondDeleted(col *schemas.Column) builder.Cond { + var colName = col.Name + if statement.JoinStr != "" { + var prefix string + if statement.TableAlias != "" { + prefix = statement.TableAlias + } else { + prefix = statement.TableName() + } + colName = statement.quote(prefix) + "." + statement.quote(col.Name) + } + var cond = builder.NewCond() + if col.SQLType.IsNumeric() { + cond = builder.Eq{colName: 0} + } else { + // FIXME: mssql: The conversion of a nvarchar data type to a datetime data type resulted in an out-of-range value. + if statement.dialect.URI().DBType != schemas.MSSQL { + cond = builder.Eq{colName: utils.ZeroTime1} + } + } + + if col.Nullable { + cond = cond.Or(builder.IsNull{colName}) + } + + return cond +} diff --git a/vendor/xorm.io/xorm/internal/statements/statement_args.go b/vendor/xorm.io/xorm/internal/statements/statement_args.go new file mode 100644 index 0000000000..dc14467d71 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/statement_args.go @@ -0,0 +1,132 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "fmt" + "reflect" + "strings" + "time" + + "xorm.io/builder" + "xorm.io/xorm/schemas" +) + +func quoteNeeded(a interface{}) bool { + switch a.(type) { + case int, int8, int16, int32, int64: + return false + case uint, uint8, uint16, uint32, uint64: + return false + case float32, float64: + return false + case bool: + return false + case string: + return true + case time.Time, *time.Time: + return true + case builder.Builder, *builder.Builder: + return false + } + + t := reflect.TypeOf(a) + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return false + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return false + case reflect.Float32, reflect.Float64: + return false + case reflect.Bool: + return false + case reflect.String: + return true + } + + return true +} + +func convertStringSingleQuote(arg string) string { + return "'" + strings.Replace(arg, "'", "''", -1) + "'" +} + +func convertString(arg string) string { + var buf strings.Builder + buf.WriteRune('\'') + for _, c := range arg { + if c == '\\' || c == '\'' { + buf.WriteRune('\\') + } + buf.WriteRune(c) + } + buf.WriteRune('\'') + return buf.String() +} + +func convertArg(arg interface{}, convertFunc func(string) string) string { + if quoteNeeded(arg) { + argv := fmt.Sprintf("%v", arg) + return convertFunc(argv) + } + + return fmt.Sprintf("%v", arg) +} + +const insertSelectPlaceHolder = true + +func (statement *Statement) WriteArg(w *builder.BytesWriter, arg interface{}) error { + switch argv := arg.(type) { + case *builder.Builder: + if _, err := w.WriteString("("); err != nil { + return err + } + if err := argv.WriteTo(w); err != nil { + return err + } + if _, err := w.WriteString(")"); err != nil { + return err + } + default: + if insertSelectPlaceHolder { + if err := w.WriteByte('?'); err != nil { + return err + } + if v, ok := arg.(bool); ok && statement.dialect.URI().DBType == schemas.MSSQL { + if v { + w.Append(1) + } else { + w.Append(0) + } + } else { + w.Append(arg) + } + } else { + var convertFunc = convertStringSingleQuote + if statement.dialect.URI().DBType == schemas.MYSQL { + convertFunc = convertString + } + if _, err := w.WriteString(convertArg(arg, convertFunc)); err != nil { + return err + } + } + } + return nil +} + +func (statement *Statement) WriteArgs(w *builder.BytesWriter, args []interface{}) error { + for i, arg := range args { + if err := statement.WriteArg(w, arg); err != nil { + return err + } + + if i+1 != len(args) { + if _, err := w.WriteString(","); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/update.go b/vendor/xorm.io/xorm/internal/statements/update.go new file mode 100644 index 0000000000..b6ae118e0f --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/update.go @@ -0,0 +1,294 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" + "time" + + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +func (statement *Statement) ifAddColUpdate(col *schemas.Column, includeVersion, includeUpdated, includeNil, + includeAutoIncr, update bool) (bool, error) { + columnMap := statement.ColumnMap + omitColumnMap := statement.OmitColumnMap + unscoped := statement.unscoped + + if !includeVersion && col.IsVersion { + return false, nil + } + if col.IsCreated && !columnMap.Contain(col.Name) { + return false, nil + } + if !includeUpdated && col.IsUpdated { + return false, nil + } + if !includeAutoIncr && col.IsAutoIncrement { + return false, nil + } + if col.IsDeleted && !unscoped { + return false, nil + } + if omitColumnMap.Contain(col.Name) { + return false, nil + } + if len(columnMap) > 0 && !columnMap.Contain(col.Name) { + return false, nil + } + + if col.MapType == schemas.ONLYFROMDB { + return false, nil + } + + if statement.IncrColumns.IsColExist(col.Name) { + return false, nil + } else if statement.DecrColumns.IsColExist(col.Name) { + return false, nil + } else if statement.ExprColumns.IsColExist(col.Name) { + return false, nil + } + + return true, nil +} + +// BuildUpdates auto generating update columnes and values according a struct +func (statement *Statement) BuildUpdates(tableValue reflect.Value, + includeVersion, includeUpdated, includeNil, + includeAutoIncr, update bool) ([]string, []interface{}, error) { + table := statement.RefTable + allUseBool := statement.allUseBool + useAllCols := statement.useAllCols + mustColumnMap := statement.MustColumnMap + nullableMap := statement.NullableMap + + var colNames = make([]string, 0) + var args = make([]interface{}, 0) + + for _, col := range table.Columns() { + ok, err := statement.ifAddColUpdate(col, includeVersion, includeUpdated, includeNil, + includeAutoIncr, update) + if err != nil { + return nil, nil, err + } + if !ok { + continue + } + + fieldValuePtr, err := col.ValueOfV(&tableValue) + if err != nil { + return nil, nil, err + } + + fieldValue := *fieldValuePtr + fieldType := reflect.TypeOf(fieldValue.Interface()) + if fieldType == nil { + continue + } + + requiredField := useAllCols + includeNil := useAllCols + + if b, ok := getFlagForColumn(mustColumnMap, col); ok { + if b { + requiredField = true + } else { + continue + } + } + + // !evalphobia! set fieldValue as nil when column is nullable and zero-value + if b, ok := getFlagForColumn(nullableMap, col); ok { + if b && col.Nullable && utils.IsZero(fieldValue.Interface()) { + var nilValue *int + fieldValue = reflect.ValueOf(nilValue) + fieldType = reflect.TypeOf(fieldValue.Interface()) + includeNil = true + } + } + + var val interface{} + + if fieldValue.CanAddr() { + if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + data, err := structConvert.ToDB() + if err != nil { + return nil, nil, err + } + + val = data + goto APPEND + } + } + + if structConvert, ok := fieldValue.Interface().(convert.Conversion); ok { + data, err := structConvert.ToDB() + if err != nil { + return nil, nil, err + } + + val = data + goto APPEND + } + + if fieldType.Kind() == reflect.Ptr { + if fieldValue.IsNil() { + if includeNil { + args = append(args, nil) + colNames = append(colNames, fmt.Sprintf("%v=?", statement.quote(col.Name))) + } + continue + } else if !fieldValue.IsValid() { + continue + } else { + // dereference ptr type to instance type + fieldValue = fieldValue.Elem() + fieldType = reflect.TypeOf(fieldValue.Interface()) + requiredField = true + } + } + + switch fieldType.Kind() { + case reflect.Bool: + if allUseBool || requiredField { + val = fieldValue.Interface() + } else { + // if a bool in a struct, it will not be as a condition because it default is false, + // please use Where() instead + continue + } + case reflect.String: + if !requiredField && fieldValue.String() == "" { + continue + } + // for MyString, should convert to string or panic + if fieldType.String() != reflect.String.String() { + val = fieldValue.String() + } else { + val = fieldValue.Interface() + } + case reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64: + if !requiredField && fieldValue.Int() == 0 { + continue + } + val = fieldValue.Interface() + case reflect.Float32, reflect.Float64: + if !requiredField && fieldValue.Float() == 0.0 { + continue + } + val = fieldValue.Interface() + case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: + if !requiredField && fieldValue.Uint() == 0 { + continue + } + val = fieldValue.Interface() + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) + if !requiredField && (t.IsZero() || !fieldValue.IsValid()) { + continue + } + val = dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) + } else if nulType, ok := fieldValue.Interface().(driver.Valuer); ok { + val, _ = nulType.Value() + if val == nil && !requiredField { + continue + } + } else { + if !col.SQLType.IsJson() { + table, err := statement.tagParser.ParseWithCache(fieldValue) + if err != nil { + val = fieldValue.Interface() + } else { + if len(table.PrimaryKeys) == 1 { + pkField := reflect.Indirect(fieldValue).FieldByName(table.PKColumns()[0].FieldName) + // fix non-int pk issues + if pkField.IsValid() && (!requiredField && !utils.IsZero(pkField.Interface())) { + val = pkField.Interface() + } else { + continue + } + } else { + return nil, nil, errors.New("Not supported multiple primary keys") + } + } + } else { + // Blank struct could not be as update data + if requiredField || !utils.IsStructZero(fieldValue) { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, nil, fmt.Errorf("mashal %v failed", fieldValue.Interface()) + } + if col.SQLType.IsText() { + val = string(bytes) + } else if col.SQLType.IsBlob() { + val = bytes + } + } else { + continue + } + } + } + case reflect.Array, reflect.Slice, reflect.Map: + if !requiredField { + if fieldValue == reflect.Zero(fieldType) { + continue + } + if fieldType.Kind() == reflect.Array { + if utils.IsArrayZero(fieldValue) { + continue + } + } else if fieldValue.IsNil() || !fieldValue.IsValid() || fieldValue.Len() == 0 { + continue + } + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, nil, err + } + val = string(bytes) + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + if fieldType.Kind() == reflect.Slice && + fieldType.Elem().Kind() == reflect.Uint8 { + if fieldValue.Len() > 0 { + val = fieldValue.Bytes() + } else { + continue + } + } else if fieldType.Kind() == reflect.Array && + fieldType.Elem().Kind() == reflect.Uint8 { + val = fieldValue.Slice(0, 0).Interface() + } else { + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, nil, err + } + val = bytes + } + } else { + continue + } + default: + val = fieldValue.Interface() + } + + APPEND: + args = append(args, val) + colNames = append(colNames, fmt.Sprintf("%v = ?", statement.quote(col.Name))) + } + + return colNames, args, nil +} diff --git a/vendor/xorm.io/xorm/internal/statements/values.go b/vendor/xorm.io/xorm/internal/statements/values.go new file mode 100644 index 0000000000..a1102c54a2 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/statements/values.go @@ -0,0 +1,154 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package statements + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "time" + + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/schemas" +) + +var ( + nullFloatType = reflect.TypeOf(sql.NullFloat64{}) +) + +// Value2Interface convert a field value of a struct to interface for puting into database +func (statement *Statement) Value2Interface(col *schemas.Column, fieldValue reflect.Value) (interface{}, error) { + if fieldValue.CanAddr() { + if fieldConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + data, err := fieldConvert.ToDB() + if err != nil { + return nil, err + } + if col.SQLType.IsBlob() { + return data, nil + } + return string(data), nil + } + } + + if fieldConvert, ok := fieldValue.Interface().(convert.Conversion); ok { + data, err := fieldConvert.ToDB() + if err != nil { + return nil, err + } + if col.SQLType.IsBlob() { + return data, nil + } + if nil == data { + return nil, nil + } + return string(data), nil + } + + fieldType := fieldValue.Type() + k := fieldType.Kind() + if k == reflect.Ptr { + if fieldValue.IsNil() { + return nil, nil + } else if !fieldValue.IsValid() { + return nil, nil + } else { + // !nashtsai! deference pointer type to instance type + fieldValue = fieldValue.Elem() + fieldType = fieldValue.Type() + k = fieldType.Kind() + } + } + + switch k { + case reflect.Bool: + return fieldValue.Bool(), nil + case reflect.String: + return fieldValue.String(), nil + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + t := fieldValue.Convert(schemas.TimeType).Interface().(time.Time) + tf := dialects.FormatColumnTime(statement.dialect, statement.defaultTimeZone, col, t) + return tf, nil + } else if fieldType.ConvertibleTo(nullFloatType) { + t := fieldValue.Convert(nullFloatType).Interface().(sql.NullFloat64) + if !t.Valid { + return nil, nil + } + return t.Float64, nil + } + + if !col.SQLType.IsJson() { + // !! 增加支持driver.Valuer接口的结构,如sql.NullString + if v, ok := fieldValue.Interface().(driver.Valuer); ok { + return v.Value() + } + + fieldTable, err := statement.tagParser.ParseWithCache(fieldValue) + if err != nil { + return nil, err + } + if len(fieldTable.PrimaryKeys) == 1 { + pkField := reflect.Indirect(fieldValue).FieldByName(fieldTable.PKColumns()[0].FieldName) + return pkField.Interface(), nil + } + return nil, fmt.Errorf("no primary key for col %v", col.Name) + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return string(bytes), nil + } else if col.SQLType.IsBlob() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return bytes, nil + } + return nil, fmt.Errorf("Unsupported type %v", fieldValue.Type()) + case reflect.Complex64, reflect.Complex128: + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return string(bytes), nil + case reflect.Array, reflect.Slice, reflect.Map: + if !fieldValue.IsValid() { + return fieldValue.Interface(), nil + } + + if col.SQLType.IsText() { + bytes, err := json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + return string(bytes), nil + } else if col.SQLType.IsBlob() { + var bytes []byte + var err error + if (k == reflect.Slice) && + (fieldValue.Type().Elem().Kind() == reflect.Uint8) { + bytes = fieldValue.Bytes() + } else { + bytes, err = json.DefaultJSONHandler.Marshal(fieldValue.Interface()) + if err != nil { + return nil, err + } + } + return bytes, nil + } + return nil, ErrUnSupportedType + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return fieldValue.Uint(), nil + default: + return fieldValue.Interface(), nil + } +} diff --git a/vendor/xorm.io/xorm/internal/utils/name.go b/vendor/xorm.io/xorm/internal/utils/name.go new file mode 100644 index 0000000000..f5fc3ff78c --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/name.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "fmt" +) + +func IndexName(tableName, idxName string) string { + return fmt.Sprintf("IDX_%v_%v", tableName, idxName) +} diff --git a/vendor/xorm.io/xorm/internal/utils/reflect.go b/vendor/xorm.io/xorm/internal/utils/reflect.go new file mode 100644 index 0000000000..3dad6bfe03 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/reflect.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "reflect" +) + +func ReflectValue(bean interface{}) reflect.Value { + return reflect.Indirect(reflect.ValueOf(bean)) +} diff --git a/vendor/xorm.io/xorm/internal/utils/slice.go b/vendor/xorm.io/xorm/internal/utils/slice.go new file mode 100644 index 0000000000..89685706db --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/slice.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import "sort" + +// SliceEq return true if two slice have the same elements even if different sort. +func SliceEq(left, right []string) bool { + if len(left) != len(right) { + return false + } + sort.Sort(sort.StringSlice(left)) + sort.Sort(sort.StringSlice(right)) + for i := 0; i < len(left); i++ { + if left[i] != right[i] { + return false + } + } + return true +} diff --git a/vendor/xorm.io/xorm/internal/utils/sql.go b/vendor/xorm.io/xorm/internal/utils/sql.go new file mode 100644 index 0000000000..5e68c4a46a --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/sql.go @@ -0,0 +1,19 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "strings" +) + +func IsSubQuery(tbName string) bool { + const selStr = "select" + if len(tbName) <= len(selStr)+1 { + return false + } + + return strings.EqualFold(tbName[:len(selStr)], selStr) || + strings.EqualFold(tbName[:len(selStr)+1], "("+selStr) +} diff --git a/vendor/xorm.io/xorm/internal/utils/strings.go b/vendor/xorm.io/xorm/internal/utils/strings.go new file mode 100644 index 0000000000..b5dc37b774 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/strings.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "strings" +) + +func IndexNoCase(s, sep string) int { + return strings.Index(strings.ToLower(s), strings.ToLower(sep)) +} + +func SplitNoCase(s, sep string) []string { + idx := IndexNoCase(s, sep) + if idx < 0 { + return []string{s} + } + return strings.Split(s, s[idx:idx+len(sep)]) +} + +func SplitNNoCase(s, sep string, n int) []string { + idx := IndexNoCase(s, sep) + if idx < 0 { + return []string{s} + } + return strings.SplitN(s, s[idx:idx+len(sep)], n) +} + diff --git a/vendor/xorm.io/xorm/internal/utils/zero.go b/vendor/xorm.io/xorm/internal/utils/zero.go new file mode 100644 index 0000000000..8f033c60b8 --- /dev/null +++ b/vendor/xorm.io/xorm/internal/utils/zero.go @@ -0,0 +1,145 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import ( + "reflect" + "time" +) + +type Zeroable interface { + IsZero() bool +} + +var nilTime *time.Time + +// IsZero returns false if k is nil or has a zero value +func IsZero(k interface{}) bool { + if k == nil { + return true + } + + switch k.(type) { + case int: + return k.(int) == 0 + case int8: + return k.(int8) == 0 + case int16: + return k.(int16) == 0 + case int32: + return k.(int32) == 0 + case int64: + return k.(int64) == 0 + case uint: + return k.(uint) == 0 + case uint8: + return k.(uint8) == 0 + case uint16: + return k.(uint16) == 0 + case uint32: + return k.(uint32) == 0 + case uint64: + return k.(uint64) == 0 + case float32: + return k.(float32) == 0 + case float64: + return k.(float64) == 0 + case bool: + return k.(bool) == false + case string: + return k.(string) == "" + case *time.Time: + return k.(*time.Time) == nilTime || IsTimeZero(*k.(*time.Time)) + case time.Time: + return IsTimeZero(k.(time.Time)) + case Zeroable: + return k.(Zeroable) == nil || k.(Zeroable).IsZero() + case reflect.Value: // for go version less than 1.13 because reflect.Value has no method IsZero + return IsValueZero(k.(reflect.Value)) + } + + return IsValueZero(reflect.ValueOf(k)) +} + +var zeroType = reflect.TypeOf((*Zeroable)(nil)).Elem() + +func IsValueZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int, reflect.Int64: + return v.Int() == 0 + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint64: + return v.Uint() == 0 + case reflect.String: + return v.Len() == 0 + case reflect.Ptr: + if v.IsNil() { + return true + } + return IsValueZero(v.Elem()) + case reflect.Struct: + return IsStructZero(v) + case reflect.Array: + return IsArrayZero(v) + } + return false +} + +func IsStructZero(v reflect.Value) bool { + if !v.IsValid() || v.NumField() == 0 { + return true + } + + if v.Type().Implements(zeroType) { + f := v.MethodByName("IsZero") + if f.IsValid() { + res := f.Call(nil) + return len(res) == 1 && res[0].Bool() + } + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + switch field.Kind() { + case reflect.Ptr: + field = field.Elem() + fallthrough + case reflect.Struct: + if !IsStructZero(field) { + return false + } + default: + if field.CanInterface() && !IsZero(field.Interface()) { + return false + } + } + } + return true +} + +func IsArrayZero(v reflect.Value) bool { + if !v.IsValid() || v.Len() == 0 { + return true + } + + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i).Interface()) { + return false + } + } + + return true +} + +const ( + ZeroTime0 = "0000-00-00 00:00:00" + ZeroTime1 = "0001-01-01 00:00:00" +) + +func IsTimeZero(t time.Time) bool { + return t.IsZero() || t.Format("2006-01-02 15:04:05") == ZeroTime0 || + t.Format("2006-01-02 15:04:05") == ZeroTime1 +} diff --git a/vendor/xorm.io/xorm/log/logger.go b/vendor/xorm.io/xorm/log/logger.go new file mode 100644 index 0000000000..eeb63693b2 --- /dev/null +++ b/vendor/xorm.io/xorm/log/logger.go @@ -0,0 +1,217 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package log + +import ( + "fmt" + "io" + "log" +) + +// LogLevel defines a log level +type LogLevel int + +// enumerate all LogLevels +const ( + // !nashtsai! following level also match syslog.Priority value + LOG_DEBUG LogLevel = iota + LOG_INFO + LOG_WARNING + LOG_ERR + LOG_OFF + LOG_UNKNOWN +) + +// default log options +const ( + DEFAULT_LOG_PREFIX = "[xorm]" + DEFAULT_LOG_FLAG = log.Ldate | log.Lmicroseconds + DEFAULT_LOG_LEVEL = LOG_DEBUG +) + +// Logger is a logger interface +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + Info(v ...interface{}) + Infof(format string, v ...interface{}) + Warn(v ...interface{}) + Warnf(format string, v ...interface{}) + + Level() LogLevel + SetLevel(l LogLevel) + + ShowSQL(show ...bool) + IsShowSQL() bool +} + +var _ Logger = DiscardLogger{} + +// DiscardLogger don't log implementation for ILogger +type DiscardLogger struct{} + +// Debug empty implementation +func (DiscardLogger) Debug(v ...interface{}) {} + +// Debugf empty implementation +func (DiscardLogger) Debugf(format string, v ...interface{}) {} + +// Error empty implementation +func (DiscardLogger) Error(v ...interface{}) {} + +// Errorf empty implementation +func (DiscardLogger) Errorf(format string, v ...interface{}) {} + +// Info empty implementation +func (DiscardLogger) Info(v ...interface{}) {} + +// Infof empty implementation +func (DiscardLogger) Infof(format string, v ...interface{}) {} + +// Warn empty implementation +func (DiscardLogger) Warn(v ...interface{}) {} + +// Warnf empty implementation +func (DiscardLogger) Warnf(format string, v ...interface{}) {} + +// Level empty implementation +func (DiscardLogger) Level() LogLevel { + return LOG_UNKNOWN +} + +// SetLevel empty implementation +func (DiscardLogger) SetLevel(l LogLevel) {} + +// ShowSQL empty implementation +func (DiscardLogger) ShowSQL(show ...bool) {} + +// IsShowSQL empty implementation +func (DiscardLogger) IsShowSQL() bool { + return false +} + +// SimpleLogger is the default implment of ILogger +type SimpleLogger struct { + DEBUG *log.Logger + ERR *log.Logger + INFO *log.Logger + WARN *log.Logger + level LogLevel + showSQL bool +} + +var _ Logger = &SimpleLogger{} + +// NewSimpleLogger use a special io.Writer as logger output +func NewSimpleLogger(out io.Writer) *SimpleLogger { + return NewSimpleLogger2(out, DEFAULT_LOG_PREFIX, DEFAULT_LOG_FLAG) +} + +// NewSimpleLogger2 let you customrize your logger prefix and flag +func NewSimpleLogger2(out io.Writer, prefix string, flag int) *SimpleLogger { + return NewSimpleLogger3(out, prefix, flag, DEFAULT_LOG_LEVEL) +} + +// NewSimpleLogger3 let you customrize your logger prefix and flag and logLevel +func NewSimpleLogger3(out io.Writer, prefix string, flag int, l LogLevel) *SimpleLogger { + return &SimpleLogger{ + DEBUG: log.New(out, fmt.Sprintf("%s [debug] ", prefix), flag), + ERR: log.New(out, fmt.Sprintf("%s [error] ", prefix), flag), + INFO: log.New(out, fmt.Sprintf("%s [info] ", prefix), flag), + WARN: log.New(out, fmt.Sprintf("%s [warn] ", prefix), flag), + level: l, + } +} + +// Error implement ILogger +func (s *SimpleLogger) Error(v ...interface{}) { + if s.level <= LOG_ERR { + s.ERR.Output(2, fmt.Sprintln(v...)) + } + return +} + +// Errorf implement ILogger +func (s *SimpleLogger) Errorf(format string, v ...interface{}) { + if s.level <= LOG_ERR { + s.ERR.Output(2, fmt.Sprintf(format, v...)) + } + return +} + +// Debug implement ILogger +func (s *SimpleLogger) Debug(v ...interface{}) { + if s.level <= LOG_DEBUG { + s.DEBUG.Output(2, fmt.Sprintln(v...)) + } + return +} + +// Debugf implement ILogger +func (s *SimpleLogger) Debugf(format string, v ...interface{}) { + if s.level <= LOG_DEBUG { + s.DEBUG.Output(2, fmt.Sprintf(format, v...)) + } + return +} + +// Info implement ILogger +func (s *SimpleLogger) Info(v ...interface{}) { + if s.level <= LOG_INFO { + s.INFO.Output(2, fmt.Sprintln(v...)) + } + return +} + +// Infof implement ILogger +func (s *SimpleLogger) Infof(format string, v ...interface{}) { + if s.level <= LOG_INFO { + s.INFO.Output(2, fmt.Sprintf(format, v...)) + } + return +} + +// Warn implement ILogger +func (s *SimpleLogger) Warn(v ...interface{}) { + if s.level <= LOG_WARNING { + s.WARN.Output(2, fmt.Sprintln(v...)) + } + return +} + +// Warnf implement ILogger +func (s *SimpleLogger) Warnf(format string, v ...interface{}) { + if s.level <= LOG_WARNING { + s.WARN.Output(2, fmt.Sprintf(format, v...)) + } + return +} + +// Level implement ILogger +func (s *SimpleLogger) Level() LogLevel { + return s.level +} + +// SetLevel implement ILogger +func (s *SimpleLogger) SetLevel(l LogLevel) { + s.level = l + return +} + +// ShowSQL implement ILogger +func (s *SimpleLogger) ShowSQL(show ...bool) { + if len(show) == 0 { + s.showSQL = true + return + } + s.showSQL = show[0] +} + +// IsShowSQL implement ILogger +func (s *SimpleLogger) IsShowSQL() bool { + return s.showSQL +} diff --git a/vendor/xorm.io/xorm/log/logger_context.go b/vendor/xorm.io/xorm/log/logger_context.go new file mode 100644 index 0000000000..6b7252ef64 --- /dev/null +++ b/vendor/xorm.io/xorm/log/logger_context.go @@ -0,0 +1,115 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package log + +import ( + "fmt" + + "xorm.io/xorm/contexts" +) + +// LogContext represents a log context +type LogContext contexts.ContextHook + +// SQLLogger represents an interface to log SQL +type SQLLogger interface { + BeforeSQL(context LogContext) // only invoked when IsShowSQL is true + AfterSQL(context LogContext) // only invoked when IsShowSQL is true +} + +// ContextLogger represents a logger interface with context +type ContextLogger interface { + SQLLogger + + Debugf(format string, v ...interface{}) + Errorf(format string, v ...interface{}) + Infof(format string, v ...interface{}) + Warnf(format string, v ...interface{}) + + Level() LogLevel + SetLevel(l LogLevel) + + ShowSQL(show ...bool) + IsShowSQL() bool +} + +var ( + _ ContextLogger = &LoggerAdapter{} +) + +// enumerate all the context keys +var ( + SessionIDKey = "__xorm_session_id" + SessionShowSQLKey = "__xorm_show_sql" +) + +// LoggerAdapter wraps a Logger interface as LoggerContext interface +type LoggerAdapter struct { + logger Logger +} + +// NewLoggerAdapter creates an adapter for old xorm logger interface +func NewLoggerAdapter(logger Logger) ContextLogger { + return &LoggerAdapter{ + logger: logger, + } +} + +// BeforeSQL implements ContextLogger +func (l *LoggerAdapter) BeforeSQL(ctx LogContext) {} + +// AfterSQL implements ContextLogger +func (l *LoggerAdapter) AfterSQL(ctx LogContext) { + var sessionPart string + v := ctx.Ctx.Value(SessionIDKey) + if key, ok := v.(string); ok { + sessionPart = fmt.Sprintf(" [%s]", key) + } + if ctx.ExecuteTime > 0 { + l.logger.Infof("[SQL]%s %s %v - %v", sessionPart, ctx.SQL, ctx.Args, ctx.ExecuteTime) + } else { + l.logger.Infof("[SQL]%s %s %v", sessionPart, ctx.SQL, ctx.Args) + } +} + +// Debugf implements ContextLogger +func (l *LoggerAdapter) Debugf(format string, v ...interface{}) { + l.logger.Debugf(format, v...) +} + +// Errorf implements ContextLogger +func (l *LoggerAdapter) Errorf(format string, v ...interface{}) { + l.logger.Errorf(format, v...) +} + +// Infof implements ContextLogger +func (l *LoggerAdapter) Infof(format string, v ...interface{}) { + l.logger.Infof(format, v...) +} + +// Warnf implements ContextLogger +func (l *LoggerAdapter) Warnf(format string, v ...interface{}) { + l.logger.Warnf(format, v...) +} + +// Level implements ContextLogger +func (l *LoggerAdapter) Level() LogLevel { + return l.logger.Level() +} + +// SetLevel implements ContextLogger +func (l *LoggerAdapter) SetLevel(lv LogLevel) { + l.logger.SetLevel(lv) +} + +// ShowSQL implements ContextLogger +func (l *LoggerAdapter) ShowSQL(show ...bool) { + l.logger.ShowSQL(show...) +} + +// IsShowSQL implements ContextLogger +func (l *LoggerAdapter) IsShowSQL() bool { + return l.logger.IsShowSQL() +} diff --git a/vendor/xorm.io/xorm/log/syslogger.go b/vendor/xorm.io/xorm/log/syslogger.go new file mode 100644 index 0000000000..0b3e381c2f --- /dev/null +++ b/vendor/xorm.io/xorm/log/syslogger.go @@ -0,0 +1,87 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !windows,!nacl,!plan9 + +package log + +import ( + "fmt" + "log/syslog" +) + +var _ Logger = &SyslogLogger{} + +// SyslogLogger will be depricated +type SyslogLogger struct { + w *syslog.Writer + showSQL bool +} + +// NewSyslogLogger implements Logger +func NewSyslogLogger(w *syslog.Writer) *SyslogLogger { + return &SyslogLogger{w: w} +} + +// Debug log content as Debug +func (s *SyslogLogger) Debug(v ...interface{}) { + s.w.Debug(fmt.Sprint(v...)) +} + +// Debugf log content as Debug and format +func (s *SyslogLogger) Debugf(format string, v ...interface{}) { + s.w.Debug(fmt.Sprintf(format, v...)) +} + +// Error log content as Error +func (s *SyslogLogger) Error(v ...interface{}) { + s.w.Err(fmt.Sprint(v...)) +} + +// Errorf log content as Errorf and format +func (s *SyslogLogger) Errorf(format string, v ...interface{}) { + s.w.Err(fmt.Sprintf(format, v...)) +} + +// Info log content as Info +func (s *SyslogLogger) Info(v ...interface{}) { + s.w.Info(fmt.Sprint(v...)) +} + +// Infof log content as Infof and format +func (s *SyslogLogger) Infof(format string, v ...interface{}) { + s.w.Info(fmt.Sprintf(format, v...)) +} + +// Warn log content as Warn +func (s *SyslogLogger) Warn(v ...interface{}) { + s.w.Warning(fmt.Sprint(v...)) +} + +// Warnf log content as Warnf and format +func (s *SyslogLogger) Warnf(format string, v ...interface{}) { + s.w.Warning(fmt.Sprintf(format, v...)) +} + +// Level shows log level +func (s *SyslogLogger) Level() LogLevel { + return LOG_UNKNOWN +} + +// SetLevel always return error, as current log/syslog package doesn't allow to set priority level after syslog.Writer created +func (s *SyslogLogger) SetLevel(l LogLevel) {} + +// ShowSQL set if logging SQL +func (s *SyslogLogger) ShowSQL(show ...bool) { + if len(show) == 0 { + s.showSQL = true + return + } + s.showSQL = show[0] +} + +// IsShowSQL if logging SQL +func (s *SyslogLogger) IsShowSQL() bool { + return s.showSQL +} diff --git a/vendor/xorm.io/xorm/names/mapper.go b/vendor/xorm.io/xorm/names/mapper.go new file mode 100644 index 0000000000..79add76e3c --- /dev/null +++ b/vendor/xorm.io/xorm/names/mapper.go @@ -0,0 +1,265 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package names + +import ( + "strings" + "sync" + "unsafe" +) + +// Mapper represents a name convertation between struct's fields name and table's column name +type Mapper interface { + Obj2Table(string) string + Table2Obj(string) string +} + +type CacheMapper struct { + oriMapper Mapper + obj2tableCache map[string]string + obj2tableMutex sync.RWMutex + table2objCache map[string]string + table2objMutex sync.RWMutex +} + +func NewCacheMapper(mapper Mapper) *CacheMapper { + return &CacheMapper{oriMapper: mapper, obj2tableCache: make(map[string]string), + table2objCache: make(map[string]string), + } +} + +func (m *CacheMapper) Obj2Table(o string) string { + m.obj2tableMutex.RLock() + t, ok := m.obj2tableCache[o] + m.obj2tableMutex.RUnlock() + if ok { + return t + } + + t = m.oriMapper.Obj2Table(o) + m.obj2tableMutex.Lock() + m.obj2tableCache[o] = t + m.obj2tableMutex.Unlock() + return t +} + +func (m *CacheMapper) Table2Obj(t string) string { + m.table2objMutex.RLock() + o, ok := m.table2objCache[t] + m.table2objMutex.RUnlock() + if ok { + return o + } + + o = m.oriMapper.Table2Obj(t) + m.table2objMutex.Lock() + m.table2objCache[t] = o + m.table2objMutex.Unlock() + return o +} + +// SameMapper implements IMapper and provides same name between struct and +// database table +type SameMapper struct { +} + +func (m SameMapper) Obj2Table(o string) string { + return o +} + +func (m SameMapper) Table2Obj(t string) string { + return t +} + +// SnakeMapper implements IMapper and provides name transaltion between +// struct and database table +type SnakeMapper struct { +} + +func b2s(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func snakeCasedName(name string) string { + newstr := make([]byte, 0, len(name)+1) + for i := 0; i < len(name); i++ { + c := name[i] + if isUpper := 'A' <= c && c <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + c += 'a' - 'A' + } + newstr = append(newstr, c) + } + + return b2s(newstr) +} + +func (mapper SnakeMapper) Obj2Table(name string) string { + return snakeCasedName(name) +} + +func titleCasedName(name string) string { + newstr := make([]byte, 0, len(name)) + upNextChar := true + + name = strings.ToLower(name) + + for i := 0; i < len(name); i++ { + c := name[i] + switch { + case upNextChar: + upNextChar = false + if 'a' <= c && c <= 'z' { + c -= 'a' - 'A' + } + case c == '_': + upNextChar = true + continue + } + + newstr = append(newstr, c) + } + + return b2s(newstr) +} + +func (mapper SnakeMapper) Table2Obj(name string) string { + return titleCasedName(name) +} + +// GonicMapper implements IMapper. It will consider initialisms when mapping names. +// E.g. id -> ID, user -> User and to table names: UserID -> user_id, MyUID -> my_uid +type GonicMapper map[string]bool + +func isASCIIUpper(r rune) bool { + return 'A' <= r && r <= 'Z' +} + +func toASCIIUpper(r rune) rune { + if 'a' <= r && r <= 'z' { + r -= ('a' - 'A') + } + return r +} + +func gonicCasedName(name string) string { + newstr := make([]rune, 0, len(name)+3) + for idx, chr := range name { + if isASCIIUpper(chr) && idx > 0 { + if !isASCIIUpper(newstr[len(newstr)-1]) { + newstr = append(newstr, '_') + } + } + + if !isASCIIUpper(chr) && idx > 1 { + l := len(newstr) + if isASCIIUpper(newstr[l-1]) && isASCIIUpper(newstr[l-2]) { + newstr = append(newstr, newstr[l-1]) + newstr[l-1] = '_' + } + } + + newstr = append(newstr, chr) + } + return strings.ToLower(string(newstr)) +} + +func (mapper GonicMapper) Obj2Table(name string) string { + return gonicCasedName(name) +} + +func (mapper GonicMapper) Table2Obj(name string) string { + newstr := make([]rune, 0) + + name = strings.ToLower(name) + parts := strings.Split(name, "_") + + for _, p := range parts { + _, isInitialism := mapper[strings.ToUpper(p)] + for i, r := range p { + if i == 0 || isInitialism { + r = toASCIIUpper(r) + } + newstr = append(newstr, r) + } + } + + return string(newstr) +} + +// LintGonicMapper is A GonicMapper that contains a list of common initialisms taken from golang/lint +var LintGonicMapper = GonicMapper{ + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SSH": true, + "TLS": true, + "TTL": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XSRF": true, + "XSS": true, +} + +// PrefixMapper provides prefix table name support +type PrefixMapper struct { + Mapper Mapper + Prefix string +} + +func (mapper PrefixMapper) Obj2Table(name string) string { + return mapper.Prefix + mapper.Mapper.Obj2Table(name) +} + +func (mapper PrefixMapper) Table2Obj(name string) string { + return mapper.Mapper.Table2Obj(name[len(mapper.Prefix):]) +} + +func NewPrefixMapper(mapper Mapper, prefix string) PrefixMapper { + return PrefixMapper{mapper, prefix} +} + +// SuffixMapper provides suffix table name support +type SuffixMapper struct { + Mapper Mapper + Suffix string +} + +func (mapper SuffixMapper) Obj2Table(name string) string { + return mapper.Mapper.Obj2Table(name) + mapper.Suffix +} + +func (mapper SuffixMapper) Table2Obj(name string) string { + return mapper.Mapper.Table2Obj(name[:len(name)-len(mapper.Suffix)]) +} + +func NewSuffixMapper(mapper Mapper, suffix string) SuffixMapper { + return SuffixMapper{mapper, suffix} +} diff --git a/vendor/xorm.io/xorm/names/table_name.go b/vendor/xorm.io/xorm/names/table_name.go new file mode 100644 index 0000000000..0afb1ae397 --- /dev/null +++ b/vendor/xorm.io/xorm/names/table_name.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package names + +import ( + "reflect" + "sync" +) + +// TableName table name interface to define customerize table name +type TableName interface { + TableName() string +} + +var ( + tpTableName = reflect.TypeOf((*TableName)(nil)).Elem() + tvCache sync.Map +) + +func GetTableName(mapper Mapper, v reflect.Value) string { + if v.Type().Implements(tpTableName) { + return v.Interface().(TableName).TableName() + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + if v.Type().Implements(tpTableName) { + return v.Interface().(TableName).TableName() + } + } else if v.CanAddr() { + v1 := v.Addr() + if v1.Type().Implements(tpTableName) { + return v1.Interface().(TableName).TableName() + } + } else { + name, ok := tvCache.Load(v.Type()) + if ok { + if name.(string) != "" { + return name.(string) + } + } else { + v2 := reflect.New(v.Type()) + if v2.Type().Implements(tpTableName) { + tableName := v2.Interface().(TableName).TableName() + tvCache.Store(v.Type(), tableName) + return tableName + } + + tvCache.Store(v.Type(), "") + } + } + + return mapper.Obj2Table(v.Type().Name()) +} diff --git a/vendor/xorm.io/xorm/processors.go b/vendor/xorm.io/xorm/processors.go new file mode 100644 index 0000000000..8697e302ab --- /dev/null +++ b/vendor/xorm.io/xorm/processors.go @@ -0,0 +1,144 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +// BeforeInsertProcessor executed before an object is initially persisted to the database +type BeforeInsertProcessor interface { + BeforeInsert() +} + +// BeforeUpdateProcessor executed before an object is updated +type BeforeUpdateProcessor interface { + BeforeUpdate() +} + +// BeforeDeleteProcessor executed before an object is deleted +type BeforeDeleteProcessor interface { + BeforeDelete() +} + +// BeforeSetProcessor executed before data set to the struct fields +type BeforeSetProcessor interface { + BeforeSet(string, Cell) +} + +// AfterSetProcessor executed after data set to the struct fields +type AfterSetProcessor interface { + AfterSet(string, Cell) +} + +// AfterInsertProcessor executed after an object is persisted to the database +type AfterInsertProcessor interface { + AfterInsert() +} + +// AfterUpdateProcessor executed after an object has been updated +type AfterUpdateProcessor interface { + AfterUpdate() +} + +// AfterDeleteProcessor executed after an object has been deleted +type AfterDeleteProcessor interface { + AfterDelete() +} + +// AfterLoadProcessor executed after an ojbect has been loaded from database +type AfterLoadProcessor interface { + AfterLoad() +} + +// AfterLoadSessionProcessor executed after an ojbect has been loaded from database with session parameter +type AfterLoadSessionProcessor interface { + AfterLoad(*Session) +} + +type executedProcessorFunc func(*Session, interface{}) error + +type executedProcessor struct { + fun executedProcessorFunc + session *Session + bean interface{} +} + +func (executor *executedProcessor) execute() error { + return executor.fun(executor.session, executor.bean) +} + +func (session *Session) executeProcessors() error { + processors := session.afterProcessors + session.afterProcessors = make([]executedProcessor, 0) + for _, processor := range processors { + if err := processor.execute(); err != nil { + return err + } + } + return nil +} + +func cleanupProcessorsClosures(slices *[]func(interface{})) { + if len(*slices) > 0 { + *slices = make([]func(interface{}), 0) + } +} + +func executeBeforeClosures(session *Session, bean interface{}) { + // handle before delete processors + for _, closure := range session.beforeClosures { + closure(bean) + } + cleanupProcessorsClosures(&session.beforeClosures) +} + +func executeBeforeSet(bean interface{}, fields []string, scanResults []interface{}) { + if b, hasBeforeSet := bean.(BeforeSetProcessor); hasBeforeSet { + for ii, key := range fields { + b.BeforeSet(key, Cell(scanResults[ii].(*interface{}))) + } + } +} + +func executeAfterSet(bean interface{}, fields []string, scanResults []interface{}) { + if b, hasAfterSet := bean.(AfterSetProcessor); hasAfterSet { + for ii, key := range fields { + b.AfterSet(key, Cell(scanResults[ii].(*interface{}))) + } + } +} + +func buildAfterProcessors(session *Session, bean interface{}) { + // handle afterClosures + for _, closure := range session.afterClosures { + session.afterProcessors = append(session.afterProcessors, executedProcessor{ + fun: func(sess *Session, bean interface{}) error { + closure(bean) + return nil + }, + session: session, + bean: bean, + }) + } + + if a, has := bean.(AfterLoadProcessor); has { + session.afterProcessors = append(session.afterProcessors, executedProcessor{ + fun: func(sess *Session, bean interface{}) error { + a.AfterLoad() + return nil + }, + session: session, + bean: bean, + }) + } + + if a, has := bean.(AfterLoadSessionProcessor); has { + session.afterProcessors = append(session.afterProcessors, executedProcessor{ + fun: func(sess *Session, bean interface{}) error { + a.AfterLoad(sess) + return nil + }, + session: session, + bean: bean, + }) + } +} diff --git a/vendor/xorm.io/xorm/rows.go b/vendor/xorm.io/xorm/rows.go new file mode 100644 index 0000000000..a56ea1c9e6 --- /dev/null +++ b/vendor/xorm.io/xorm/rows.go @@ -0,0 +1,158 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" + + "xorm.io/builder" + "xorm.io/xorm/core" + "xorm.io/xorm/internal/utils" +) + +// Rows rows wrapper a rows to +type Rows struct { + session *Session + rows *core.Rows + beanType reflect.Type + lastError error +} + +func newRows(session *Session, bean interface{}) (*Rows, error) { + rows := new(Rows) + rows.session = session + rows.beanType = reflect.Indirect(reflect.ValueOf(bean)).Type() + + var sqlStr string + var args []interface{} + var err error + + beanValue := reflect.ValueOf(bean) + if beanValue.Kind() != reflect.Ptr { + return nil, errors.New("needs a pointer to a value") + } else if beanValue.Elem().Kind() == reflect.Ptr { + return nil, errors.New("a pointer to a pointer is not allowed") + } + + if err = rows.session.statement.SetRefBean(bean); err != nil { + return nil, err + } + + if len(session.statement.TableName()) <= 0 { + return nil, ErrTableNotFound + } + + if rows.session.statement.RawSQL == "" { + var autoCond builder.Cond + var addedTableName = (len(session.statement.JoinStr) > 0) + var table = rows.session.statement.RefTable + + if !session.statement.NoAutoCondition { + var err error + autoCond, err = session.statement.BuildConds(table, bean, true, true, false, true, addedTableName) + if err != nil { + return nil, err + } + } else { + // !oinume! Add " IS NULL" to WHERE whatever condiBean is given. + // See https://gitea.com/xorm/xorm/issues/179 + if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled + var colName = session.engine.Quote(col.Name) + if addedTableName { + var nm = session.statement.TableName() + if len(session.statement.TableAlias) > 0 { + nm = session.statement.TableAlias + } + colName = session.engine.Quote(nm) + "." + colName + } + + autoCond = session.statement.CondDeleted(col) + } + } + + sqlStr, args, err = rows.session.statement.GenFindSQL(autoCond) + if err != nil { + return nil, err + } + } else { + sqlStr = rows.session.statement.GenRawSQL() + args = rows.session.statement.RawParams + } + + rows.rows, err = rows.session.queryRows(sqlStr, args...) + if err != nil { + rows.lastError = err + rows.Close() + return nil, err + } + + return rows, nil +} + +// Next move cursor to next record, return false if end has reached +func (rows *Rows) Next() bool { + if rows.lastError == nil && rows.rows != nil { + hasNext := rows.rows.Next() + if !hasNext { + rows.lastError = sql.ErrNoRows + } + return hasNext + } + return false +} + +// Err returns the error, if any, that was encountered during iteration. Err may be called after an explicit or implicit Close. +func (rows *Rows) Err() error { + return rows.lastError +} + +// Scan row record to bean properties +func (rows *Rows) Scan(bean interface{}) error { + if rows.lastError != nil { + return rows.lastError + } + + if reflect.Indirect(reflect.ValueOf(bean)).Type() != rows.beanType { + return fmt.Errorf("scan arg is incompatible type to [%v]", rows.beanType) + } + + if err := rows.session.statement.SetRefBean(bean); err != nil { + return err + } + + fields, err := rows.rows.Columns() + if err != nil { + return err + } + + scanResults, err := rows.session.row2Slice(rows.rows, fields, bean) + if err != nil { + return err + } + + dataStruct := utils.ReflectValue(bean) + _, err = rows.session.slice2Bean(scanResults, fields, bean, &dataStruct, rows.session.statement.RefTable) + if err != nil { + return err + } + + return rows.session.executeProcessors() +} + +// Close session if session.IsAutoClose is true, and claimed any opened resources +func (rows *Rows) Close() error { + if rows.session.isAutoClose { + defer rows.session.Close() + } + + if rows.rows != nil { + return rows.rows.Close() + } + + return rows.lastError +} diff --git a/vendor/xorm.io/xorm/schemas/column.go b/vendor/xorm.io/xorm/schemas/column.go new file mode 100644 index 0000000000..db66a3a675 --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/column.go @@ -0,0 +1,133 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +const ( + TWOSIDES = iota + 1 + ONLYTODB + ONLYFROMDB +) + +// Column defines database column +type Column struct { + Name string + TableName string + FieldName string // Avaiable only when parsed from a struct + SQLType SQLType + IsJSON bool + Length int + Length2 int + Nullable bool + Default string + Indexes map[string]int + IsPrimaryKey bool + IsAutoIncrement bool + MapType int + IsCreated bool + IsUpdated bool + IsDeleted bool + IsCascade bool + IsVersion bool + DefaultIsEmpty bool // false means column has no default set, but not default value is empty + EnumOptions map[string]int + SetOptions map[string]int + DisableTimeZone bool + TimeZone *time.Location // column specified time zone + Comment string +} + +// NewColumn creates a new column +func NewColumn(name, fieldName string, sqlType SQLType, len1, len2 int, nullable bool) *Column { + return &Column{ + Name: name, + TableName: "", + FieldName: fieldName, + SQLType: sqlType, + Length: len1, + Length2: len2, + Nullable: nullable, + Default: "", + Indexes: make(map[string]int), + IsPrimaryKey: false, + IsAutoIncrement: false, + MapType: TWOSIDES, + IsCreated: false, + IsUpdated: false, + IsDeleted: false, + IsCascade: false, + IsVersion: false, + DefaultIsEmpty: true, // default should be no default + EnumOptions: make(map[string]int), + Comment: "", + } +} + +// ValueOf returns column's filed of struct's value +func (col *Column) ValueOf(bean interface{}) (*reflect.Value, error) { + dataStruct := reflect.Indirect(reflect.ValueOf(bean)) + return col.ValueOfV(&dataStruct) +} + +// ValueOfV returns column's filed of struct's value accept reflevt value +func (col *Column) ValueOfV(dataStruct *reflect.Value) (*reflect.Value, error) { + var fieldValue reflect.Value + fieldPath := strings.Split(col.FieldName, ".") + + if dataStruct.Type().Kind() == reflect.Map { + keyValue := reflect.ValueOf(fieldPath[len(fieldPath)-1]) + fieldValue = dataStruct.MapIndex(keyValue) + return &fieldValue, nil + } else if dataStruct.Type().Kind() == reflect.Interface { + structValue := reflect.ValueOf(dataStruct.Interface()) + dataStruct = &structValue + } + + level := len(fieldPath) + fieldValue = dataStruct.FieldByName(fieldPath[0]) + for i := 0; i < level-1; i++ { + if !fieldValue.IsValid() { + break + } + if fieldValue.Kind() == reflect.Struct { + fieldValue = fieldValue.FieldByName(fieldPath[i+1]) + } else if fieldValue.Kind() == reflect.Ptr { + if fieldValue.IsNil() { + fieldValue.Set(reflect.New(fieldValue.Type().Elem())) + } + fieldValue = fieldValue.Elem().FieldByName(fieldPath[i+1]) + } else { + return nil, fmt.Errorf("field %v is not valid", col.FieldName) + } + } + + if !fieldValue.IsValid() { + return nil, fmt.Errorf("field %v is not valid", col.FieldName) + } + + return &fieldValue, nil +} + +// ConvertID converts id content to suitable type according column type +func (col *Column) ConvertID(sid string) (interface{}, error) { + if col.SQLType.IsNumeric() { + n, err := strconv.ParseInt(sid, 10, 64) + if err != nil { + return nil, err + } + return n, nil + } else if col.SQLType.IsText() { + return sid, nil + } + return nil, errors.New("not supported") +} diff --git a/vendor/xorm.io/xorm/schemas/index.go b/vendor/xorm.io/xorm/schemas/index.go new file mode 100644 index 0000000000..9541250f55 --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/index.go @@ -0,0 +1,72 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "fmt" + "strings" +) + +// enumerate all index types +const ( + IndexType = iota + 1 + UniqueType +) + +// Index represents a database index +type Index struct { + IsRegular bool + Name string + Type int + Cols []string +} + +// NewIndex new an index object +func NewIndex(name string, indexType int) *Index { + return &Index{true, name, indexType, make([]string, 0)} +} + +func (index *Index) XName(tableName string) string { + if !strings.HasPrefix(index.Name, "UQE_") && + !strings.HasPrefix(index.Name, "IDX_") { + tableParts := strings.Split(strings.Replace(tableName, `"`, "", -1), ".") + tableName = tableParts[len(tableParts)-1] + if index.Type == UniqueType { + return fmt.Sprintf("UQE_%v_%v", tableName, index.Name) + } + return fmt.Sprintf("IDX_%v_%v", tableName, index.Name) + } + return index.Name +} + +// AddColumn add columns which will be composite index +func (index *Index) AddColumn(cols ...string) { + for _, col := range cols { + index.Cols = append(index.Cols, col) + } +} + +func (index *Index) Equal(dst *Index) bool { + if index.Type != dst.Type { + return false + } + if len(index.Cols) != len(dst.Cols) { + return false + } + + for i := 0; i < len(index.Cols); i++ { + var found bool + for j := 0; j < len(dst.Cols); j++ { + if index.Cols[i] == dst.Cols[j] { + found = true + break + } + } + if !found { + return false + } + } + return true +} diff --git a/vendor/xorm.io/xorm/schemas/pk.go b/vendor/xorm.io/xorm/schemas/pk.go new file mode 100644 index 0000000000..03916b44fd --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/pk.go @@ -0,0 +1,41 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "bytes" + "encoding/gob" + + "xorm.io/xorm/internal/utils" +) + +type PK []interface{} + +func NewPK(pks ...interface{}) *PK { + p := PK(pks) + return &p +} + +func (p *PK) IsZero() bool { + for _, k := range *p { + if utils.IsZero(k) { + return true + } + } + return false +} + +func (p *PK) ToString() (string, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + err := enc.Encode(*p) + return buf.String(), err +} + +func (p *PK) FromString(content string) error { + dec := gob.NewDecoder(bytes.NewBufferString(content)) + err := dec.Decode(p) + return err +} diff --git a/vendor/xorm.io/xorm/schemas/quote.go b/vendor/xorm.io/xorm/schemas/quote.go new file mode 100644 index 0000000000..c44abe250f --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/quote.go @@ -0,0 +1,240 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "strings" +) + +// Quoter represents a quoter to the SQL table name and column name +type Quoter struct { + Prefix byte + Suffix byte + IsReserved func(string) bool +} + +var ( + // AlwaysFalseReverse always think it's not a reverse word + AlwaysNoReserve = func(string) bool { return false } + + // AlwaysReverse always reverse the word + AlwaysReserve = func(string) bool { return true } + + // CommanQuoteMark represnets the common quote mark + CommanQuoteMark byte = '`' + + // CommonQuoter represetns a common quoter + CommonQuoter = Quoter{CommanQuoteMark, CommanQuoteMark, AlwaysReserve} +) + +func (q Quoter) IsEmpty() bool { + return q.Prefix == 0 && q.Suffix == 0 +} + +func (q Quoter) Quote(s string) string { + var buf strings.Builder + q.QuoteTo(&buf, s) + return buf.String() +} + +// Trim removes quotes from s +func (q Quoter) Trim(s string) string { + if len(s) < 2 { + return s + } + + var buf strings.Builder + for i := 0; i < len(s); i++ { + switch { + case i == 0 && s[i] == q.Prefix: + case i == len(s)-1 && s[i] == q.Suffix: + case s[i] == q.Suffix && s[i+1] == '.': + case s[i] == q.Prefix && s[i-1] == '.': + default: + buf.WriteByte(s[i]) + } + } + return buf.String() +} + +func (q Quoter) Join(a []string, sep string) string { + var b strings.Builder + q.JoinWrite(&b, a, sep) + return b.String() +} + +func (q Quoter) JoinWrite(b *strings.Builder, a []string, sep string) error { + if len(a) == 0 { + return nil + } + + n := len(sep) * (len(a) - 1) + for i := 0; i < len(a); i++ { + n += len(a[i]) + } + + b.Grow(n) + for i, s := range a { + if i > 0 { + if _, err := b.WriteString(sep); err != nil { + return err + } + } + if s != "*" { + q.QuoteTo(b, strings.TrimSpace(s)) + } + } + return nil +} + +func findWord(v string, start int) int { + for j := start; j < len(v); j++ { + switch v[j] { + case '.', ' ': + return j + } + } + return len(v) +} + +func findStart(value string, start int) int { + if value[start] == '.' { + return start + 1 + } + if value[start] != ' ' { + return start + } + + var k = -1 + for j := start; j < len(value); j++ { + if value[j] != ' ' { + k = j + break + } + } + if k == -1 { + return len(value) + } + + if (value[k] == 'A' || value[k] == 'a') && (value[k+1] == 'S' || value[k+1] == 's') { + k = k + 2 + } + + for j := k; j < len(value); j++ { + if value[j] != ' ' { + return j + } + } + return len(value) +} + +func (q Quoter) quoteWordTo(buf *strings.Builder, word string) error { + var realWord = word + if (word[0] == CommanQuoteMark && word[len(word)-1] == CommanQuoteMark) || + (word[0] == q.Prefix && word[len(word)-1] == q.Suffix) { + realWord = word[1 : len(word)-1] + } + + if q.IsEmpty() { + _, err := buf.WriteString(realWord) + return err + } + + isReserved := q.IsReserved(realWord) + if isReserved { + if err := buf.WriteByte(q.Prefix); err != nil { + return err + } + } + if _, err := buf.WriteString(realWord); err != nil { + return err + } + if isReserved { + return buf.WriteByte(q.Suffix) + } + + return nil +} + +// QuoteTo quotes the table or column names. i.e. if the quotes are [ and ] +// name -> [name] +// `name` -> [name] +// [name] -> [name] +// schema.name -> [schema].[name] +// `schema`.`name` -> [schema].[name] +// `schema`.name -> [schema].[name] +// schema.`name` -> [schema].[name] +// [schema].name -> [schema].[name] +// schema.[name] -> [schema].[name] +// name AS a -> [name] AS a +// schema.name AS a -> [schema].[name] AS a +func (q Quoter) QuoteTo(buf *strings.Builder, value string) error { + var i int + for i < len(value) { + start := findStart(value, i) + if start > i { + if _, err := buf.WriteString(value[i:start]); err != nil { + return err + } + } + if start == len(value) { + return nil + } + + var nextEnd = findWord(value, start) + if err := q.quoteWordTo(buf, value[start:nextEnd]); err != nil { + return err + } + i = nextEnd + } + return nil +} + +// Strings quotes a slice of string +func (q Quoter) Strings(s []string) []string { + var res = make([]string, 0, len(s)) + for _, a := range s { + res = append(res, q.Quote(a)) + } + return res +} + +// Replace replaces common quote(`) as the quotes on the sql +func (q Quoter) Replace(sql string) string { + if q.IsEmpty() { + return sql + } + + var buf strings.Builder + buf.Grow(len(sql)) + + var beginSingleQuote bool + for i := 0; i < len(sql); i++ { + if !beginSingleQuote && sql[i] == CommanQuoteMark { + var j = i + 1 + for ; j < len(sql); j++ { + if sql[j] == CommanQuoteMark { + break + } + } + word := sql[i+1 : j] + isReserved := q.IsReserved(word) + if isReserved { + buf.WriteByte(q.Prefix) + } + buf.WriteString(word) + if isReserved { + buf.WriteByte(q.Suffix) + } + i = j + } else { + if sql[i] == '\'' { + beginSingleQuote = !beginSingleQuote + } + buf.WriteByte(sql[i]) + } + } + return buf.String() +} diff --git a/vendor/xorm.io/xorm/schemas/table.go b/vendor/xorm.io/xorm/schemas/table.go new file mode 100644 index 0000000000..6c57a7e382 --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/table.go @@ -0,0 +1,195 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +// Table represents a database table +type Table struct { + Name string + Type reflect.Type + columnsSeq []string + columnsMap map[string][]*Column + columns []*Column + Indexes map[string]*Index + PrimaryKeys []string + AutoIncrement string + Created map[string]bool + Updated string + Deleted string + Version string + StoreEngine string + Charset string + Comment string +} + +// NewEmptyTable creates an empty table +func NewEmptyTable() *Table { + return NewTable("", nil) +} + +// NewTable creates a new Table object +func NewTable(name string, t reflect.Type) *Table { + return &Table{Name: name, Type: t, + columnsSeq: make([]string, 0), + columns: make([]*Column, 0), + columnsMap: make(map[string][]*Column), + Indexes: make(map[string]*Index), + Created: make(map[string]bool), + PrimaryKeys: make([]string, 0), + } +} + +// Columns returns table's columns +func (table *Table) Columns() []*Column { + return table.columns +} + +// ColumnsSeq returns table's column names according sequence +func (table *Table) ColumnsSeq() []string { + return table.columnsSeq +} + +func (table *Table) columnsByName(name string) []*Column { + for k, cols := range table.columnsMap { + if strings.EqualFold(k, name) { + return cols + } + } + return nil +} + +// GetColumn returns column according column name, if column not found, return nil +func (table *Table) GetColumn(name string) *Column { + cols := table.columnsByName(name) + if cols != nil { + return cols[0] + } + + return nil +} + +// GetColumnIdx returns column according name and idx +func (table *Table) GetColumnIdx(name string, idx int) *Column { + cols := table.columnsByName(name) + if cols != nil && idx < len(cols) { + return cols[idx] + } + + return nil +} + +// PKColumns reprents all primary key columns +func (table *Table) PKColumns() []*Column { + columns := make([]*Column, len(table.PrimaryKeys)) + for i, name := range table.PrimaryKeys { + columns[i] = table.GetColumn(name) + } + return columns +} + +func (table *Table) ColumnType(name string) reflect.Type { + t, _ := table.Type.FieldByName(name) + return t.Type +} + +func (table *Table) AutoIncrColumn() *Column { + return table.GetColumn(table.AutoIncrement) +} + +func (table *Table) VersionColumn() *Column { + return table.GetColumn(table.Version) +} + +func (table *Table) UpdatedColumn() *Column { + return table.GetColumn(table.Updated) +} + +func (table *Table) DeletedColumn() *Column { + return table.GetColumn(table.Deleted) +} + +// AddColumn adds a column to table +func (table *Table) AddColumn(col *Column) { + table.columnsSeq = append(table.columnsSeq, col.Name) + table.columns = append(table.columns, col) + colName := strings.ToLower(col.Name) + if c, ok := table.columnsMap[colName]; ok { + table.columnsMap[colName] = append(c, col) + } else { + table.columnsMap[colName] = []*Column{col} + } + + if col.IsPrimaryKey { + table.PrimaryKeys = append(table.PrimaryKeys, col.Name) + } + if col.IsAutoIncrement { + table.AutoIncrement = col.Name + } + if col.IsCreated { + table.Created[col.Name] = true + } + if col.IsUpdated { + table.Updated = col.Name + } + if col.IsDeleted { + table.Deleted = col.Name + } + if col.IsVersion { + table.Version = col.Name + } +} + +// AddIndex adds an index or an unique to table +func (table *Table) AddIndex(index *Index) { + table.Indexes[index.Name] = index +} + +// IDOfV get id from one value of struct +func (table *Table) IDOfV(rv reflect.Value) (PK, error) { + v := reflect.Indirect(rv) + pk := make([]interface{}, len(table.PrimaryKeys)) + for i, col := range table.PKColumns() { + var err error + + fieldName := col.FieldName + for { + parts := strings.SplitN(fieldName, ".", 2) + if len(parts) == 1 { + break + } + + v = v.FieldByName(parts[0]) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("Unsupported read value of column %s from field %s", col.Name, col.FieldName) + } + fieldName = parts[1] + } + + pkField := v.FieldByName(fieldName) + switch pkField.Kind() { + case reflect.String: + pk[i], err = col.ConvertID(pkField.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + pk[i], err = col.ConvertID(strconv.FormatInt(pkField.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // id of uint will be converted to int64 + pk[i], err = col.ConvertID(strconv.FormatUint(pkField.Uint(), 10)) + } + + if err != nil { + return nil, err + } + } + return PK(pk), nil +} diff --git a/vendor/xorm.io/xorm/schemas/type.go b/vendor/xorm.io/xorm/schemas/type.go new file mode 100644 index 0000000000..89459a4de1 --- /dev/null +++ b/vendor/xorm.io/xorm/schemas/type.go @@ -0,0 +1,336 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schemas + +import ( + "reflect" + "sort" + "strings" + "time" +) + +type DBType string + +const ( + POSTGRES DBType = "postgres" + SQLITE DBType = "sqlite3" + MYSQL DBType = "mysql" + MSSQL DBType = "mssql" + ORACLE DBType = "oracle" +) + +// SQLType represents SQL types +type SQLType struct { + Name string + DefaultLength int + DefaultLength2 int +} + +const ( + UNKNOW_TYPE = iota + TEXT_TYPE + BLOB_TYPE + TIME_TYPE + NUMERIC_TYPE + ARRAY_TYPE +) + +func (s *SQLType) IsType(st int) bool { + if t, ok := SqlTypes[s.Name]; ok && t == st { + return true + } + return false +} + +func (s *SQLType) IsText() bool { + return s.IsType(TEXT_TYPE) +} + +func (s *SQLType) IsBlob() bool { + return s.IsType(BLOB_TYPE) +} + +func (s *SQLType) IsTime() bool { + return s.IsType(TIME_TYPE) +} + +func (s *SQLType) IsNumeric() bool { + return s.IsType(NUMERIC_TYPE) +} + +func (s *SQLType) IsArray() bool { + return s.IsType(ARRAY_TYPE) +} + +func (s *SQLType) IsJson() bool { + return s.Name == Json || s.Name == Jsonb +} + +var ( + Bit = "BIT" + TinyInt = "TINYINT" + SmallInt = "SMALLINT" + MediumInt = "MEDIUMINT" + Int = "INT" + Integer = "INTEGER" + BigInt = "BIGINT" + + Enum = "ENUM" + Set = "SET" + + Char = "CHAR" + Varchar = "VARCHAR" + NChar = "NCHAR" + NVarchar = "NVARCHAR" + TinyText = "TINYTEXT" + Text = "TEXT" + NText = "NTEXT" + Clob = "CLOB" + MediumText = "MEDIUMTEXT" + LongText = "LONGTEXT" + Uuid = "UUID" + UniqueIdentifier = "UNIQUEIDENTIFIER" + SysName = "SYSNAME" + + Date = "DATE" + DateTime = "DATETIME" + SmallDateTime = "SMALLDATETIME" + Time = "TIME" + TimeStamp = "TIMESTAMP" + TimeStampz = "TIMESTAMPZ" + Year = "YEAR" + + Decimal = "DECIMAL" + Numeric = "NUMERIC" + Money = "MONEY" + SmallMoney = "SMALLMONEY" + + Real = "REAL" + Float = "FLOAT" + Double = "DOUBLE" + + Binary = "BINARY" + VarBinary = "VARBINARY" + TinyBlob = "TINYBLOB" + Blob = "BLOB" + MediumBlob = "MEDIUMBLOB" + LongBlob = "LONGBLOB" + Bytea = "BYTEA" + + Bool = "BOOL" + Boolean = "BOOLEAN" + + Serial = "SERIAL" + BigSerial = "BIGSERIAL" + + Json = "JSON" + Jsonb = "JSONB" + + Array = "ARRAY" + + SqlTypes = map[string]int{ + Bit: NUMERIC_TYPE, + TinyInt: NUMERIC_TYPE, + SmallInt: NUMERIC_TYPE, + MediumInt: NUMERIC_TYPE, + Int: NUMERIC_TYPE, + Integer: NUMERIC_TYPE, + BigInt: NUMERIC_TYPE, + + Enum: TEXT_TYPE, + Set: TEXT_TYPE, + Json: TEXT_TYPE, + Jsonb: TEXT_TYPE, + + Char: TEXT_TYPE, + NChar: TEXT_TYPE, + Varchar: TEXT_TYPE, + NVarchar: TEXT_TYPE, + TinyText: TEXT_TYPE, + Text: TEXT_TYPE, + NText: TEXT_TYPE, + MediumText: TEXT_TYPE, + LongText: TEXT_TYPE, + Uuid: TEXT_TYPE, + Clob: TEXT_TYPE, + SysName: TEXT_TYPE, + + Date: TIME_TYPE, + DateTime: TIME_TYPE, + Time: TIME_TYPE, + TimeStamp: TIME_TYPE, + TimeStampz: TIME_TYPE, + SmallDateTime: TIME_TYPE, + Year: TIME_TYPE, + + Decimal: NUMERIC_TYPE, + Numeric: NUMERIC_TYPE, + Real: NUMERIC_TYPE, + Float: NUMERIC_TYPE, + Double: NUMERIC_TYPE, + Money: NUMERIC_TYPE, + SmallMoney: NUMERIC_TYPE, + + Binary: BLOB_TYPE, + VarBinary: BLOB_TYPE, + + TinyBlob: BLOB_TYPE, + Blob: BLOB_TYPE, + MediumBlob: BLOB_TYPE, + LongBlob: BLOB_TYPE, + Bytea: BLOB_TYPE, + UniqueIdentifier: BLOB_TYPE, + + Bool: NUMERIC_TYPE, + + Serial: NUMERIC_TYPE, + BigSerial: NUMERIC_TYPE, + + Array: ARRAY_TYPE, + } + + intTypes = sort.StringSlice{"*int", "*int16", "*int32", "*int8"} + uintTypes = sort.StringSlice{"*uint", "*uint16", "*uint32", "*uint8"} +) + +// !nashtsai! treat following var as interal const values, these are used for reflect.TypeOf comparison +var ( + c_EMPTY_STRING string + c_BOOL_DEFAULT bool + c_BYTE_DEFAULT byte + c_COMPLEX64_DEFAULT complex64 + c_COMPLEX128_DEFAULT complex128 + c_FLOAT32_DEFAULT float32 + c_FLOAT64_DEFAULT float64 + c_INT64_DEFAULT int64 + c_UINT64_DEFAULT uint64 + c_INT32_DEFAULT int32 + c_UINT32_DEFAULT uint32 + c_INT16_DEFAULT int16 + c_UINT16_DEFAULT uint16 + c_INT8_DEFAULT int8 + c_UINT8_DEFAULT uint8 + c_INT_DEFAULT int + c_UINT_DEFAULT uint + c_TIME_DEFAULT time.Time +) + +var ( + IntType = reflect.TypeOf(c_INT_DEFAULT) + Int8Type = reflect.TypeOf(c_INT8_DEFAULT) + Int16Type = reflect.TypeOf(c_INT16_DEFAULT) + Int32Type = reflect.TypeOf(c_INT32_DEFAULT) + Int64Type = reflect.TypeOf(c_INT64_DEFAULT) + + UintType = reflect.TypeOf(c_UINT_DEFAULT) + Uint8Type = reflect.TypeOf(c_UINT8_DEFAULT) + Uint16Type = reflect.TypeOf(c_UINT16_DEFAULT) + Uint32Type = reflect.TypeOf(c_UINT32_DEFAULT) + Uint64Type = reflect.TypeOf(c_UINT64_DEFAULT) + + Float32Type = reflect.TypeOf(c_FLOAT32_DEFAULT) + Float64Type = reflect.TypeOf(c_FLOAT64_DEFAULT) + + Complex64Type = reflect.TypeOf(c_COMPLEX64_DEFAULT) + Complex128Type = reflect.TypeOf(c_COMPLEX128_DEFAULT) + + StringType = reflect.TypeOf(c_EMPTY_STRING) + BoolType = reflect.TypeOf(c_BOOL_DEFAULT) + ByteType = reflect.TypeOf(c_BYTE_DEFAULT) + BytesType = reflect.SliceOf(ByteType) + + TimeType = reflect.TypeOf(c_TIME_DEFAULT) +) + +var ( + PtrIntType = reflect.PtrTo(IntType) + PtrInt8Type = reflect.PtrTo(Int8Type) + PtrInt16Type = reflect.PtrTo(Int16Type) + PtrInt32Type = reflect.PtrTo(Int32Type) + PtrInt64Type = reflect.PtrTo(Int64Type) + + PtrUintType = reflect.PtrTo(UintType) + PtrUint8Type = reflect.PtrTo(Uint8Type) + PtrUint16Type = reflect.PtrTo(Uint16Type) + PtrUint32Type = reflect.PtrTo(Uint32Type) + PtrUint64Type = reflect.PtrTo(Uint64Type) + + PtrFloat32Type = reflect.PtrTo(Float32Type) + PtrFloat64Type = reflect.PtrTo(Float64Type) + + PtrComplex64Type = reflect.PtrTo(Complex64Type) + PtrComplex128Type = reflect.PtrTo(Complex128Type) + + PtrStringType = reflect.PtrTo(StringType) + PtrBoolType = reflect.PtrTo(BoolType) + PtrByteType = reflect.PtrTo(ByteType) + + PtrTimeType = reflect.PtrTo(TimeType) +) + +// Type2SQLType generate SQLType acorrding Go's type +func Type2SQLType(t reflect.Type) (st SQLType) { + switch k := t.Kind(); k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + st = SQLType{Int, 0, 0} + case reflect.Int64, reflect.Uint64: + st = SQLType{BigInt, 0, 0} + case reflect.Float32: + st = SQLType{Float, 0, 0} + case reflect.Float64: + st = SQLType{Double, 0, 0} + case reflect.Complex64, reflect.Complex128: + st = SQLType{Varchar, 64, 0} + case reflect.Array, reflect.Slice, reflect.Map: + if t.Elem() == reflect.TypeOf(c_BYTE_DEFAULT) { + st = SQLType{Blob, 0, 0} + } else { + st = SQLType{Text, 0, 0} + } + case reflect.Bool: + st = SQLType{Bool, 0, 0} + case reflect.String: + st = SQLType{Varchar, 255, 0} + case reflect.Struct: + if t.ConvertibleTo(TimeType) { + st = SQLType{DateTime, 0, 0} + } else { + // TODO need to handle association struct + st = SQLType{Text, 0, 0} + } + case reflect.Ptr: + st = Type2SQLType(t.Elem()) + default: + st = SQLType{Text, 0, 0} + } + return +} + +// default sql type change to go types +func SQLType2Type(st SQLType) reflect.Type { + name := strings.ToUpper(st.Name) + switch name { + case Bit, TinyInt, SmallInt, MediumInt, Int, Integer, Serial: + return reflect.TypeOf(1) + case BigInt, BigSerial: + return reflect.TypeOf(int64(1)) + case Float, Real: + return reflect.TypeOf(float32(1)) + case Double: + return reflect.TypeOf(float64(1)) + case Char, NChar, Varchar, NVarchar, TinyText, Text, NText, MediumText, LongText, Enum, Set, Uuid, Clob, SysName: + return reflect.TypeOf("") + case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary, UniqueIdentifier: + return reflect.TypeOf([]byte{}) + case Bool: + return reflect.TypeOf(true) + case DateTime, Date, Time, TimeStamp, TimeStampz, SmallDateTime, Year: + return reflect.TypeOf(c_TIME_DEFAULT) + case Decimal, Numeric, Money, SmallMoney: + return reflect.TypeOf("") + default: + return reflect.TypeOf("") + } +} diff --git a/vendor/xorm.io/xorm/session.go b/vendor/xorm.io/xorm/session.go new file mode 100644 index 0000000000..761b14152f --- /dev/null +++ b/vendor/xorm.io/xorm/session.go @@ -0,0 +1,904 @@ +// Copyright 2015 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "database/sql" + "encoding/hex" + "errors" + "fmt" + "hash/crc32" + "io" + "reflect" + "strings" + "time" + + "xorm.io/xorm/contexts" + "xorm.io/xorm/convert" + "xorm.io/xorm/core" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/statements" + "xorm.io/xorm/log" + "xorm.io/xorm/schemas" +) + +// ErrFieldIsNotExist columns does not exist +type ErrFieldIsNotExist struct { + FieldName string + TableName string +} + +func (e ErrFieldIsNotExist) Error() string { + return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName) +} + +// ErrFieldIsNotValid is not valid +type ErrFieldIsNotValid struct { + FieldName string + TableName string +} + +func (e ErrFieldIsNotValid) Error() string { + return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName) +} + +type sessionType bool + +const ( + engineSession sessionType = false + groupSession sessionType = true +) + +// Session keep a pointer to sql.DB and provides all execution of all +// kind of database operations. +type Session struct { + engine *Engine + tx *core.Tx + statement *statements.Statement + isAutoCommit bool + isCommitedOrRollbacked bool + isAutoClose bool + isClosed bool + prepareStmt bool + // Automatically reset the statement after operations that execute a SQL + // query such as Count(), Find(), Get(), ... + autoResetStatement bool + + // !nashtsai! storing these beans due to yet committed tx + afterInsertBeans map[interface{}]*[]func(interface{}) + afterUpdateBeans map[interface{}]*[]func(interface{}) + afterDeleteBeans map[interface{}]*[]func(interface{}) + // -- + + beforeClosures []func(interface{}) + afterClosures []func(interface{}) + afterProcessors []executedProcessor + + stmtCache map[uint32]*core.Stmt //key: hash.Hash32 of (queryStr, len(queryStr)) + + lastSQL string + lastSQLArgs []interface{} + + ctx context.Context + sessionType sessionType +} + +func newSessionID() string { + hash := sha256.New() + _, err := io.CopyN(hash, rand.Reader, 50) + if err != nil { + return "????????????????????" + } + md := hash.Sum(nil) + mdStr := hex.EncodeToString(md) + return mdStr[0:20] +} + +func newSession(engine *Engine) *Session { + var ctx context.Context + if engine.logSessionID { + ctx = context.WithValue(engine.defaultContext, log.SessionIDKey, newSessionID()) + } else { + ctx = engine.defaultContext + } + + return &Session{ + ctx: ctx, + engine: engine, + tx: nil, + statement: statements.NewStatement( + engine.dialect, + engine.tagParser, + engine.DatabaseTZ, + ), + isClosed: false, + isAutoCommit: true, + isCommitedOrRollbacked: false, + isAutoClose: false, + autoResetStatement: true, + prepareStmt: false, + + afterInsertBeans: make(map[interface{}]*[]func(interface{}), 0), + afterUpdateBeans: make(map[interface{}]*[]func(interface{}), 0), + afterDeleteBeans: make(map[interface{}]*[]func(interface{}), 0), + beforeClosures: make([]func(interface{}), 0), + afterClosures: make([]func(interface{}), 0), + afterProcessors: make([]executedProcessor, 0), + stmtCache: make(map[uint32]*core.Stmt), + + lastSQL: "", + lastSQLArgs: make([]interface{}, 0), + + sessionType: engineSession, + } +} + +// Close release the connection from pool +func (session *Session) Close() error { + for _, v := range session.stmtCache { + if err := v.Close(); err != nil { + return err + } + } + + if !session.isClosed { + // When Close be called, if session is a transaction and do not call + // Commit or Rollback, then call Rollback. + if session.tx != nil && !session.isCommitedOrRollbacked { + if err := session.Rollback(); err != nil { + return err + } + } + session.tx = nil + session.stmtCache = nil + session.isClosed = true + } + return nil +} + +func (session *Session) db() *core.DB { + return session.engine.db +} + +func (session *Session) getQueryer() core.Queryer { + if session.tx != nil { + return session.tx + } + return session.db() +} + +// ContextCache enable context cache or not +func (session *Session) ContextCache(context contexts.ContextCache) *Session { + session.statement.SetContextCache(context) + return session +} + +// IsClosed returns if session is closed +func (session *Session) IsClosed() bool { + return session.isClosed +} + +func (session *Session) resetStatement() { + if session.autoResetStatement { + session.statement.Reset() + } +} + +// Prepare set a flag to session that should be prepare statement before execute query +func (session *Session) Prepare() *Session { + session.prepareStmt = true + return session +} + +// Before Apply before Processor, affected bean is passed to closure arg +func (session *Session) Before(closures func(interface{})) *Session { + if closures != nil { + session.beforeClosures = append(session.beforeClosures, closures) + } + return session +} + +// After Apply after Processor, affected bean is passed to closure arg +func (session *Session) After(closures func(interface{})) *Session { + if closures != nil { + session.afterClosures = append(session.afterClosures, closures) + } + return session +} + +// Table can input a string or pointer to struct for special a table to operate. +func (session *Session) Table(tableNameOrBean interface{}) *Session { + if err := session.statement.SetTable(tableNameOrBean); err != nil { + session.statement.LastError = err + } + return session +} + +// Alias set the table alias +func (session *Session) Alias(alias string) *Session { + session.statement.Alias(alias) + return session +} + +// NoCascade indicate that no cascade load child object +func (session *Session) NoCascade() *Session { + session.statement.UseCascade = false + return session +} + +// ForUpdate Set Read/Write locking for UPDATE +func (session *Session) ForUpdate() *Session { + session.statement.IsForUpdate = true + return session +} + +// NoAutoCondition disable generate SQL condition from beans +func (session *Session) NoAutoCondition(no ...bool) *Session { + session.statement.SetNoAutoCondition(no...) + return session +} + +// Limit provide limit and offset query condition +func (session *Session) Limit(limit int, start ...int) *Session { + session.statement.Limit(limit, start...) + return session +} + +// OrderBy provide order by query condition, the input parameter is the content +// after order by on a sql statement. +func (session *Session) OrderBy(order string) *Session { + session.statement.OrderBy(order) + return session +} + +// Desc provide desc order by query condition, the input parameters are columns. +func (session *Session) Desc(colNames ...string) *Session { + session.statement.Desc(colNames...) + return session +} + +// Asc provide asc order by query condition, the input parameters are columns. +func (session *Session) Asc(colNames ...string) *Session { + session.statement.Asc(colNames...) + return session +} + +// StoreEngine is only avialble mysql dialect currently +func (session *Session) StoreEngine(storeEngine string) *Session { + session.statement.StoreEngine = storeEngine + return session +} + +// Charset is only avialble mysql dialect currently +func (session *Session) Charset(charset string) *Session { + session.statement.Charset = charset + return session +} + +// Cascade indicates if loading sub Struct +func (session *Session) Cascade(trueOrFalse ...bool) *Session { + if len(trueOrFalse) >= 1 { + session.statement.UseCascade = trueOrFalse[0] + } + return session +} + +// MustLogSQL means record SQL or not and don't follow engine's setting +func (session *Session) MustLogSQL(logs ...bool) *Session { + var showSQL = true + if len(logs) > 0 { + showSQL = logs[0] + } + session.ctx = context.WithValue(session.ctx, log.SessionShowSQLKey, showSQL) + return session +} + +// NoCache ask this session do not retrieve data from cache system and +// get data from database directly. +func (session *Session) NoCache() *Session { + session.statement.UseCache = false + return session +} + +// Join join_operator should be one of INNER, LEFT OUTER, CROSS etc - this will be prepended to JOIN +func (session *Session) Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session { + session.statement.Join(joinOperator, tablename, condition, args...) + return session +} + +// GroupBy Generate Group By statement +func (session *Session) GroupBy(keys string) *Session { + session.statement.GroupBy(keys) + return session +} + +// Having Generate Having statement +func (session *Session) Having(conditions string) *Session { + session.statement.Having(conditions) + return session +} + +// DB db return the wrapper of sql.DB +func (session *Session) DB() *core.DB { + return session.db() +} + +func (session *Session) canCache() bool { + if session.statement.RefTable == nil || + session.statement.JoinStr != "" || + session.statement.RawSQL != "" || + !session.statement.UseCache || + session.statement.IsForUpdate || + session.tx != nil || + len(session.statement.SelectStr) > 0 { + return false + } + return true +} + +func (session *Session) doPrepare(db *core.DB, sqlStr string) (stmt *core.Stmt, err error) { + crc := crc32.ChecksumIEEE([]byte(sqlStr)) + // TODO try hash(sqlStr+len(sqlStr)) + var has bool + stmt, has = session.stmtCache[crc] + if !has { + stmt, err = db.PrepareContext(session.ctx, sqlStr) + if err != nil { + return nil, err + } + session.stmtCache[crc] = stmt + } + return +} + +func (session *Session) getField(dataStruct *reflect.Value, key string, table *schemas.Table, idx int) (*reflect.Value, error) { + var col *schemas.Column + if col = table.GetColumnIdx(key, idx); col == nil { + return nil, ErrFieldIsNotExist{key, table.Name} + } + + fieldValue, err := col.ValueOfV(dataStruct) + if err != nil { + return nil, err + } + + if !fieldValue.IsValid() || !fieldValue.CanSet() { + return nil, ErrFieldIsNotValid{key, table.Name} + } + + return fieldValue, nil +} + +// Cell cell is a result of one column field +type Cell *interface{} + +func (session *Session) rows2Beans(rows *core.Rows, fields []string, + table *schemas.Table, newElemFunc func([]string) reflect.Value, + sliceValueSetFunc func(*reflect.Value, schemas.PK) error) error { + for rows.Next() { + var newValue = newElemFunc(fields) + bean := newValue.Interface() + dataStruct := newValue.Elem() + + // handle beforeClosures + scanResults, err := session.row2Slice(rows, fields, bean) + if err != nil { + return err + } + pk, err := session.slice2Bean(scanResults, fields, bean, &dataStruct, table) + if err != nil { + return err + } + session.afterProcessors = append(session.afterProcessors, executedProcessor{ + fun: func(*Session, interface{}) error { + return sliceValueSetFunc(&newValue, pk) + }, + session: session, + bean: bean, + }) + } + return nil +} + +func (session *Session) row2Slice(rows *core.Rows, fields []string, bean interface{}) ([]interface{}, error) { + for _, closure := range session.beforeClosures { + closure(bean) + } + + scanResults := make([]interface{}, len(fields)) + for i := 0; i < len(fields); i++ { + var cell interface{} + scanResults[i] = &cell + } + if err := rows.Scan(scanResults...); err != nil { + return nil, err + } + + executeBeforeSet(bean, fields, scanResults) + + return scanResults, nil +} + +func (session *Session) slice2Bean(scanResults []interface{}, fields []string, bean interface{}, dataStruct *reflect.Value, table *schemas.Table) (schemas.PK, error) { + defer func() { + executeAfterSet(bean, fields, scanResults) + }() + + buildAfterProcessors(session, bean) + + var tempMap = make(map[string]int) + var pk schemas.PK + for ii, key := range fields { + var idx int + var ok bool + var lKey = strings.ToLower(key) + if idx, ok = tempMap[lKey]; !ok { + idx = 0 + } else { + idx = idx + 1 + } + tempMap[lKey] = idx + + fieldValue, err := session.getField(dataStruct, key, table, idx) + if err != nil { + if !strings.Contains(err.Error(), "is not valid") { + session.engine.logger.Warnf("%v", err) + } + continue + } + if fieldValue == nil { + continue + } + rawValue := reflect.Indirect(reflect.ValueOf(scanResults[ii])) + + // if row is null then ignore + if rawValue.Interface() == nil { + continue + } + + if fieldValue.CanAddr() { + if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + if data, err := value2Bytes(&rawValue); err == nil { + if err := structConvert.FromDB(data); err != nil { + return nil, err + } + } else { + return nil, err + } + continue + } + } + + if _, ok := fieldValue.Interface().(convert.Conversion); ok { + if data, err := value2Bytes(&rawValue); err == nil { + if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() { + fieldValue.Set(reflect.New(fieldValue.Type().Elem())) + } + fieldValue.Interface().(convert.Conversion).FromDB(data) + } else { + return nil, err + } + continue + } + + rawValueType := reflect.TypeOf(rawValue.Interface()) + vv := reflect.ValueOf(rawValue.Interface()) + col := table.GetColumnIdx(key, idx) + if col.IsPrimaryKey { + pk = append(pk, rawValue.Interface()) + } + fieldType := fieldValue.Type() + hasAssigned := false + + if col.SQLType.IsJson() { + var bs []byte + if rawValueType.Kind() == reflect.String { + bs = []byte(vv.String()) + } else if rawValueType.ConvertibleTo(schemas.BytesType) { + bs = vv.Bytes() + } else { + return nil, fmt.Errorf("unsupported database data type: %s %v", key, rawValueType.Kind()) + } + + hasAssigned = true + + if len(bs) > 0 { + if fieldType.Kind() == reflect.String { + fieldValue.SetString(string(bs)) + continue + } + if fieldValue.CanAddr() { + err := json.DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) + if err != nil { + return nil, err + } + } else { + x := reflect.New(fieldType) + err := json.DefaultJSONHandler.Unmarshal(bs, x.Interface()) + if err != nil { + return nil, err + } + fieldValue.Set(x.Elem()) + } + } + + continue + } + + switch fieldType.Kind() { + case reflect.Complex64, reflect.Complex128: + // TODO: reimplement this + var bs []byte + if rawValueType.Kind() == reflect.String { + bs = []byte(vv.String()) + } else if rawValueType.ConvertibleTo(schemas.BytesType) { + bs = vv.Bytes() + } + + hasAssigned = true + if len(bs) > 0 { + if fieldValue.CanAddr() { + err := json.DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) + if err != nil { + return nil, err + } + } else { + x := reflect.New(fieldType) + err := json.DefaultJSONHandler.Unmarshal(bs, x.Interface()) + if err != nil { + return nil, err + } + fieldValue.Set(x.Elem()) + } + } + case reflect.Slice, reflect.Array: + switch rawValueType.Kind() { + case reflect.Slice, reflect.Array: + switch rawValueType.Elem().Kind() { + case reflect.Uint8: + if fieldType.Elem().Kind() == reflect.Uint8 { + hasAssigned = true + if col.SQLType.IsText() { + x := reflect.New(fieldType) + err := json.DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface()) + if err != nil { + return nil, err + } + fieldValue.Set(x.Elem()) + } else { + if fieldValue.Len() > 0 { + for i := 0; i < fieldValue.Len(); i++ { + if i < vv.Len() { + fieldValue.Index(i).Set(vv.Index(i)) + } + } + } else { + for i := 0; i < vv.Len(); i++ { + fieldValue.Set(reflect.Append(*fieldValue, vv.Index(i))) + } + } + } + } + } + } + case reflect.String: + if rawValueType.Kind() == reflect.String { + hasAssigned = true + fieldValue.SetString(vv.String()) + } + case reflect.Bool: + if rawValueType.Kind() == reflect.Bool { + hasAssigned = true + fieldValue.SetBool(vv.Bool()) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch rawValueType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + hasAssigned = true + fieldValue.SetInt(vv.Int()) + } + case reflect.Float32, reflect.Float64: + switch rawValueType.Kind() { + case reflect.Float32, reflect.Float64: + hasAssigned = true + fieldValue.SetFloat(vv.Float()) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + switch rawValueType.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + hasAssigned = true + fieldValue.SetUint(vv.Uint()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + hasAssigned = true + fieldValue.SetUint(uint64(vv.Int())) + } + case reflect.Struct: + if fieldType.ConvertibleTo(schemas.TimeType) { + dbTZ := session.engine.DatabaseTZ + if col.TimeZone != nil { + dbTZ = col.TimeZone + } + + if rawValueType == schemas.TimeType { + hasAssigned = true + + t := vv.Convert(schemas.TimeType).Interface().(time.Time) + + z, _ := t.Zone() + // set new location if database don't save timezone or give an incorrect timezone + if len(z) == 0 || t.Year() == 0 || t.Location().String() != dbTZ.String() { // !nashtsai! HACK tmp work around for lib/pq doesn't properly time with location + session.engine.logger.Debugf("empty zone key[%v] : %v | zone: %v | location: %+v\n", key, t, z, *t.Location()) + t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), + t.Minute(), t.Second(), t.Nanosecond(), dbTZ) + } + + t = t.In(session.engine.TZLocation) + fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) + } else if rawValueType == schemas.IntType || rawValueType == schemas.Int64Type || + rawValueType == schemas.Int32Type { + hasAssigned = true + + t := time.Unix(vv.Int(), 0).In(session.engine.TZLocation) + fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) + } else { + if d, ok := vv.Interface().([]uint8); ok { + hasAssigned = true + t, err := session.byte2Time(col, d) + if err != nil { + session.engine.logger.Errorf("byte2Time error: %v", err) + hasAssigned = false + } else { + fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) + } + } else if d, ok := vv.Interface().(string); ok { + hasAssigned = true + t, err := session.str2Time(col, d) + if err != nil { + session.engine.logger.Errorf("byte2Time error: %v", err) + hasAssigned = false + } else { + fieldValue.Set(reflect.ValueOf(t).Convert(fieldType)) + } + } else { + return nil, fmt.Errorf("rawValueType is %v, value is %v", rawValueType, vv.Interface()) + } + } + } else if nulVal, ok := fieldValue.Addr().Interface().(sql.Scanner); ok { + // !! 增加支持sql.Scanner接口的结构,如sql.NullString + hasAssigned = true + if err := nulVal.Scan(vv.Interface()); err != nil { + session.engine.logger.Errorf("sql.Sanner error: %v", err) + hasAssigned = false + } + } else if col.SQLType.IsJson() { + if rawValueType.Kind() == reflect.String { + hasAssigned = true + x := reflect.New(fieldType) + if len([]byte(vv.String())) > 0 { + err := json.DefaultJSONHandler.Unmarshal([]byte(vv.String()), x.Interface()) + if err != nil { + return nil, err + } + fieldValue.Set(x.Elem()) + } + } else if rawValueType.Kind() == reflect.Slice { + hasAssigned = true + x := reflect.New(fieldType) + if len(vv.Bytes()) > 0 { + err := json.DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface()) + if err != nil { + return nil, err + } + fieldValue.Set(x.Elem()) + } + } + } else if session.statement.UseCascade { + table, err := session.engine.tagParser.ParseWithCache(*fieldValue) + if err != nil { + return nil, err + } + + hasAssigned = true + if len(table.PrimaryKeys) != 1 { + return nil, errors.New("unsupported non or composited primary key cascade") + } + var pk = make(schemas.PK, len(table.PrimaryKeys)) + pk[0], err = asKind(vv, rawValueType) + if err != nil { + return nil, err + } + + if !pk.IsZero() { + // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch + // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne + // property to be fetched lazily + structInter := reflect.New(fieldValue.Type()) + has, err := session.ID(pk).NoCascade().get(structInter.Interface()) + if err != nil { + return nil, err + } + if has { + fieldValue.Set(structInter.Elem()) + } else { + return nil, errors.New("cascade obj is not exist") + } + } + } + case reflect.Ptr: + // !nashtsai! TODO merge duplicated codes above + switch fieldType { + // following types case matching ptr's native type, therefore assign ptr directly + case schemas.PtrStringType: + if rawValueType.Kind() == reflect.String { + x := vv.String() + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrBoolType: + if rawValueType.Kind() == reflect.Bool { + x := vv.Bool() + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrTimeType: + if rawValueType == schemas.PtrTimeType { + hasAssigned = true + var x = rawValue.Interface().(time.Time) + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrFloat64Type: + if rawValueType.Kind() == reflect.Float64 { + x := vv.Float() + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrUint64Type: + if rawValueType.Kind() == reflect.Int64 { + var x = uint64(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrInt64Type: + if rawValueType.Kind() == reflect.Int64 { + x := vv.Int() + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrFloat32Type: + if rawValueType.Kind() == reflect.Float64 { + var x = float32(vv.Float()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrIntType: + if rawValueType.Kind() == reflect.Int64 { + var x = int(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrInt32Type: + if rawValueType.Kind() == reflect.Int64 { + var x = int32(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrInt8Type: + if rawValueType.Kind() == reflect.Int64 { + var x = int8(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrInt16Type: + if rawValueType.Kind() == reflect.Int64 { + var x = int16(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrUintType: + if rawValueType.Kind() == reflect.Int64 { + var x = uint(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.PtrUint32Type: + if rawValueType.Kind() == reflect.Int64 { + var x = uint32(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.Uint8Type: + if rawValueType.Kind() == reflect.Int64 { + var x = uint8(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.Uint16Type: + if rawValueType.Kind() == reflect.Int64 { + var x = uint16(vv.Int()) + hasAssigned = true + fieldValue.Set(reflect.ValueOf(&x)) + } + case schemas.Complex64Type: + var x complex64 + if len([]byte(vv.String())) > 0 { + err := json.DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x) + if err != nil { + return nil, err + } + fieldValue.Set(reflect.ValueOf(&x)) + } + hasAssigned = true + case schemas.Complex128Type: + var x complex128 + if len([]byte(vv.String())) > 0 { + err := json.DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x) + if err != nil { + return nil, err + } + fieldValue.Set(reflect.ValueOf(&x)) + } + hasAssigned = true + } // switch fieldType + } // switch fieldType.Kind() + + // !nashtsai! for value can't be assigned directly fallback to convert to []byte then back to value + if !hasAssigned { + data, err := value2Bytes(&rawValue) + if err != nil { + return nil, err + } + + if err = session.bytes2Value(col, fieldValue, data); err != nil { + return nil, err + } + } + } + return pk, nil +} + +// saveLastSQL stores executed query information +func (session *Session) saveLastSQL(sql string, args ...interface{}) { + session.lastSQL = sql + session.lastSQLArgs = args +} + +// LastSQL returns last query information +func (session *Session) LastSQL() (string, []interface{}) { + return session.lastSQL, session.lastSQLArgs +} + +// Unscoped always disable struct tag "deleted" +func (session *Session) Unscoped() *Session { + session.statement.SetUnscoped() + return session +} + +func (session *Session) incrVersionFieldValue(fieldValue *reflect.Value) { + switch fieldValue.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fieldValue.SetInt(fieldValue.Int() + 1) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fieldValue.SetUint(fieldValue.Uint() + 1) + } +} + +// ContextHook sets the context on this session +func (session *Session) Context(ctx context.Context) *Session { + session.ctx = ctx + return session +} + +// PingContext test if database is ok +func (session *Session) PingContext(ctx context.Context) error { + if session.isAutoClose { + defer session.Close() + } + + session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName()) + return session.DB().PingContext(ctx) +} diff --git a/vendor/xorm.io/xorm/session_cols.go b/vendor/xorm.io/xorm/session_cols.go new file mode 100644 index 0000000000..ca3589abad --- /dev/null +++ b/vendor/xorm.io/xorm/session_cols.go @@ -0,0 +1,143 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "reflect" + "strings" + "time" + + "xorm.io/xorm/schemas" +) + +func setColumnInt(bean interface{}, col *schemas.Column, t int64) { + v, err := col.ValueOf(bean) + if err != nil { + return + } + if v.CanSet() { + switch v.Type().Kind() { + case reflect.Int, reflect.Int64, reflect.Int32: + v.SetInt(t) + case reflect.Uint, reflect.Uint64, reflect.Uint32: + v.SetUint(uint64(t)) + } + } +} + +func setColumnTime(bean interface{}, col *schemas.Column, t time.Time) { + v, err := col.ValueOf(bean) + if err != nil { + return + } + if v.CanSet() { + switch v.Type().Kind() { + case reflect.Struct: + v.Set(reflect.ValueOf(t).Convert(v.Type())) + case reflect.Int, reflect.Int64, reflect.Int32: + v.SetInt(t.Unix()) + case reflect.Uint, reflect.Uint64, reflect.Uint32: + v.SetUint(uint64(t.Unix())) + } + } +} + +func getFlagForColumn(m map[string]bool, col *schemas.Column) (val bool, has bool) { + if len(m) == 0 { + return false, false + } + + n := len(col.Name) + + for mk := range m { + if len(mk) != n { + continue + } + if strings.EqualFold(mk, col.Name) { + return m[mk], true + } + } + + return false, false +} + +// Incr provides a query string like "count = count + 1" +func (session *Session) Incr(column string, arg ...interface{}) *Session { + session.statement.Incr(column, arg...) + return session +} + +// Decr provides a query string like "count = count - 1" +func (session *Session) Decr(column string, arg ...interface{}) *Session { + session.statement.Decr(column, arg...) + return session +} + +// SetExpr provides a query string like "column = {expression}" +func (session *Session) SetExpr(column string, expression interface{}) *Session { + session.statement.SetExpr(column, expression) + return session +} + +// Select provides some columns to special +func (session *Session) Select(str string) *Session { + session.statement.Select(str) + return session +} + +// Cols provides some columns to special +func (session *Session) Cols(columns ...string) *Session { + session.statement.Cols(columns...) + return session +} + +// AllCols ask all columns +func (session *Session) AllCols() *Session { + session.statement.AllCols() + return session +} + +// MustCols specify some columns must use even if they are empty +func (session *Session) MustCols(columns ...string) *Session { + session.statement.MustCols(columns...) + return session +} + +// UseBool automatically retrieve condition according struct, but +// if struct has bool field, it will ignore them. So use UseBool +// to tell system to do not ignore them. +// If no parameters, it will use all the bool field of struct, or +// it will use parameters's columns +func (session *Session) UseBool(columns ...string) *Session { + session.statement.UseBool(columns...) + return session +} + +// Distinct use for distinct columns. Caution: when you are using cache, +// distinct will not be cached because cache system need id, +// but distinct will not provide id +func (session *Session) Distinct(columns ...string) *Session { + session.statement.Distinct(columns...) + return session +} + +// Omit Only not use the parameters as select or update columns +func (session *Session) Omit(columns ...string) *Session { + session.statement.Omit(columns...) + return session +} + +// Nullable Set null when column is zero-value and nullable for update +func (session *Session) Nullable(columns ...string) *Session { + session.statement.Nullable(columns...) + return session +} + +// NoAutoTime means do not automatically give created field and updated field +// the current time on the current session temporarily +func (session *Session) NoAutoTime() *Session { + session.statement.UseAutoTime = false + return session +} diff --git a/vendor/xorm.io/xorm/session_cond.go b/vendor/xorm.io/xorm/session_cond.go new file mode 100644 index 0000000000..25d1714815 --- /dev/null +++ b/vendor/xorm.io/xorm/session_cond.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import "xorm.io/builder" + +// SQL provides raw sql input parameter. When you have a complex SQL statement +// and cannot use Where, Id, In and etc. Methods to describe, you can use SQL. +func (session *Session) SQL(query interface{}, args ...interface{}) *Session { + session.statement.SQL(query, args...) + return session +} + +// Where provides custom query condition. +func (session *Session) Where(query interface{}, args ...interface{}) *Session { + session.statement.Where(query, args...) + return session +} + +// And provides custom query condition. +func (session *Session) And(query interface{}, args ...interface{}) *Session { + session.statement.And(query, args...) + return session +} + +// Or provides custom query condition. +func (session *Session) Or(query interface{}, args ...interface{}) *Session { + session.statement.Or(query, args...) + return session +} + +// ID provides converting id as a query condition +func (session *Session) ID(id interface{}) *Session { + session.statement.ID(id) + return session +} + +// In provides a query string like "id in (1, 2, 3)" +func (session *Session) In(column string, args ...interface{}) *Session { + session.statement.In(column, args...) + return session +} + +// NotIn provides a query string like "id in (1, 2, 3)" +func (session *Session) NotIn(column string, args ...interface{}) *Session { + session.statement.NotIn(column, args...) + return session +} + +// Conds returns session query conditions except auto bean conditions +func (session *Session) Conds() builder.Cond { + return session.statement.Conds() +} diff --git a/vendor/xorm.io/xorm/session_convert.go b/vendor/xorm.io/xorm/session_convert.go new file mode 100644 index 0000000000..a683994731 --- /dev/null +++ b/vendor/xorm.io/xorm/session_convert.go @@ -0,0 +1,529 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "xorm.io/xorm/convert" + "xorm.io/xorm/internal/json" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +func (session *Session) str2Time(col *schemas.Column, data string) (outTime time.Time, outErr error) { + sdata := strings.TrimSpace(data) + var x time.Time + var err error + + var parseLoc = session.engine.DatabaseTZ + if col.TimeZone != nil { + parseLoc = col.TimeZone + } + + if sdata == utils.ZeroTime0 || sdata == utils.ZeroTime1 { + } else if !strings.ContainsAny(sdata, "- :") { // !nashtsai! has only found that mymysql driver is using this for time type column + // time stamp + sd, err := strconv.ParseInt(sdata, 10, 64) + if err == nil { + x = time.Unix(sd, 0) + //session.engine.logger.Debugf("time(0) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } else { + //session.engine.logger.Debugf("time(0) err key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } + } else if len(sdata) > 19 && strings.Contains(sdata, "-") { + x, err = time.ParseInLocation(time.RFC3339Nano, sdata, parseLoc) + session.engine.logger.Debugf("time(1) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + if err != nil { + x, err = time.ParseInLocation("2006-01-02 15:04:05.999999999", sdata, parseLoc) + //session.engine.logger.Debugf("time(2) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } + if err != nil { + x, err = time.ParseInLocation("2006-01-02 15:04:05.9999999 Z07:00", sdata, parseLoc) + //session.engine.logger.Debugf("time(3) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } + } else if len(sdata) == 19 && strings.Contains(sdata, "-") { + x, err = time.ParseInLocation("2006-01-02 15:04:05", sdata, parseLoc) + //session.engine.logger.Debugf("time(4) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } else if len(sdata) == 10 && sdata[4] == '-' && sdata[7] == '-' { + x, err = time.ParseInLocation("2006-01-02", sdata, parseLoc) + //session.engine.logger.Debugf("time(5) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } else if col.SQLType.Name == schemas.Time { + if strings.Contains(sdata, " ") { + ssd := strings.Split(sdata, " ") + sdata = ssd[1] + } + + sdata = strings.TrimSpace(sdata) + if session.engine.dialect.URI().DBType == schemas.MYSQL && len(sdata) > 8 { + sdata = sdata[len(sdata)-8:] + } + + st := fmt.Sprintf("2006-01-02 %v", sdata) + x, err = time.ParseInLocation("2006-01-02 15:04:05", st, parseLoc) + //session.engine.logger.Debugf("time(6) key[%v]: %+v | sdata: [%v]\n", col.FieldName, x, sdata) + } else { + outErr = fmt.Errorf("unsupported time format %v", sdata) + return + } + if err != nil { + outErr = fmt.Errorf("unsupported time format %v: %v", sdata, err) + return + } + outTime = x.In(session.engine.TZLocation) + return +} + +func (session *Session) byte2Time(col *schemas.Column, data []byte) (outTime time.Time, outErr error) { + return session.str2Time(col, string(data)) +} + +// convert a db data([]byte) to a field value +func (session *Session) bytes2Value(col *schemas.Column, fieldValue *reflect.Value, data []byte) error { + if structConvert, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + return structConvert.FromDB(data) + } + + if structConvert, ok := fieldValue.Interface().(convert.Conversion); ok { + return structConvert.FromDB(data) + } + + var v interface{} + key := col.Name + fieldType := fieldValue.Type() + + switch fieldType.Kind() { + case reflect.Complex64, reflect.Complex128: + x := reflect.New(fieldType) + if len(data) > 0 { + err := json.DefaultJSONHandler.Unmarshal(data, x.Interface()) + if err != nil { + return err + } + fieldValue.Set(x.Elem()) + } + case reflect.Slice, reflect.Array, reflect.Map: + v = data + t := fieldType.Elem() + k := t.Kind() + if col.SQLType.IsText() { + x := reflect.New(fieldType) + if len(data) > 0 { + err := json.DefaultJSONHandler.Unmarshal(data, x.Interface()) + if err != nil { + return err + } + fieldValue.Set(x.Elem()) + } + } else if col.SQLType.IsBlob() { + if k == reflect.Uint8 { + fieldValue.Set(reflect.ValueOf(v)) + } else { + x := reflect.New(fieldType) + if len(data) > 0 { + err := json.DefaultJSONHandler.Unmarshal(data, x.Interface()) + if err != nil { + return err + } + fieldValue.Set(x.Elem()) + } + } + } else { + return ErrUnSupportedType + } + case reflect.String: + fieldValue.SetString(string(data)) + case reflect.Bool: + v, err := asBool(data) + if err != nil { + return fmt.Errorf("arg %v as bool: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(v)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + sdata := string(data) + var x int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + session.engine.dialect.URI().DBType == schemas.MYSQL { // !nashtsai! TODO dialect needs to provide conversion interface API + if len(data) == 1 { + x = int64(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x, err = strconv.ParseInt(sdata, 16, 64) + } else if strings.HasPrefix(sdata, "0") { + x, err = strconv.ParseInt(sdata, 8, 64) + } else if strings.EqualFold(sdata, "true") { + x = 1 + } else if strings.EqualFold(sdata, "false") { + x = 0 + } else { + x, err = strconv.ParseInt(sdata, 10, 64) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.SetInt(x) + case reflect.Float32, reflect.Float64: + x, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return fmt.Errorf("arg %v as float64: %s", key, err.Error()) + } + fieldValue.SetFloat(x) + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + x, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.SetUint(x) + //Currently only support Time type + case reflect.Struct: + // !! 增加支持sql.Scanner接口的结构,如sql.NullString + if nulVal, ok := fieldValue.Addr().Interface().(sql.Scanner); ok { + if err := nulVal.Scan(data); err != nil { + return fmt.Errorf("sql.Scan(%v) failed: %s ", data, err.Error()) + } + } else { + if fieldType.ConvertibleTo(schemas.TimeType) { + x, err := session.byte2Time(col, data) + if err != nil { + return err + } + v = x + fieldValue.Set(reflect.ValueOf(v).Convert(fieldType)) + } else if session.statement.UseCascade { + table, err := session.engine.tagParser.ParseWithCache(*fieldValue) + if err != nil { + return err + } + + // TODO: current only support 1 primary key + if len(table.PrimaryKeys) > 1 { + return errors.New("unsupported composited primary key cascade") + } + + var pk = make(schemas.PK, len(table.PrimaryKeys)) + rawValueType := table.ColumnType(table.PKColumns()[0].FieldName) + pk[0], err = str2PK(string(data), rawValueType) + if err != nil { + return err + } + + if !pk.IsZero() { + // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch + // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne + // property to be fetched lazily + structInter := reflect.New(fieldValue.Type()) + has, err := session.ID(pk).NoCascade().get(structInter.Interface()) + if err != nil { + return err + } + if has { + v = structInter.Elem().Interface() + fieldValue.Set(reflect.ValueOf(v)) + } else { + return errors.New("cascade obj is not exist") + } + } + } + } + case reflect.Ptr: + // !nashtsai! TODO merge duplicated codes above + //typeStr := fieldType.String() + switch fieldType.Elem().Kind() { + // case "*string": + case schemas.StringType.Kind(): + x := string(data) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*bool": + case schemas.BoolType.Kind(): + d := string(data) + v, err := strconv.ParseBool(d) + if err != nil { + return fmt.Errorf("arg %v as bool: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&v).Convert(fieldType)) + // case "*complex64": + case schemas.Complex64Type.Kind(): + var x complex64 + if len(data) > 0 { + err := json.DefaultJSONHandler.Unmarshal(data, &x) + if err != nil { + return err + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + } + // case "*complex128": + case schemas.Complex128Type.Kind(): + var x complex128 + if len(data) > 0 { + err := json.DefaultJSONHandler.Unmarshal(data, &x) + if err != nil { + return err + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + } + // case "*float64": + case schemas.Float64Type.Kind(): + x, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return fmt.Errorf("arg %v as float64: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*float32": + case schemas.Float32Type.Kind(): + var x float32 + x1, err := strconv.ParseFloat(string(data), 32) + if err != nil { + return fmt.Errorf("arg %v as float32: %s", key, err.Error()) + } + x = float32(x1) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*uint64": + case schemas.Uint64Type.Kind(): + var x uint64 + x, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*uint": + case schemas.UintType.Kind(): + var x uint + x1, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + x = uint(x1) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*uint32": + case schemas.Uint32Type.Kind(): + var x uint32 + x1, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + x = uint32(x1) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*uint8": + case schemas.Uint8Type.Kind(): + var x uint8 + x1, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + x = uint8(x1) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*uint16": + case schemas.Uint16Type.Kind(): + var x uint16 + x1, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + x = uint16(x1) + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*int64": + case schemas.Int64Type.Kind(): + sdata := string(data) + var x int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + strings.Contains(session.engine.DriverName(), "mysql") { + if len(data) == 1 { + x = int64(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x, err = strconv.ParseInt(sdata, 16, 64) + } else if strings.HasPrefix(sdata, "0") { + x, err = strconv.ParseInt(sdata, 8, 64) + } else { + x, err = strconv.ParseInt(sdata, 10, 64) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*int": + case schemas.IntType.Kind(): + sdata := string(data) + var x int + var x1 int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + strings.Contains(session.engine.DriverName(), "mysql") { + if len(data) == 1 { + x = int(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x1, err = strconv.ParseInt(sdata, 16, 64) + x = int(x1) + } else if strings.HasPrefix(sdata, "0") { + x1, err = strconv.ParseInt(sdata, 8, 64) + x = int(x1) + } else { + x1, err = strconv.ParseInt(sdata, 10, 64) + x = int(x1) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*int32": + case schemas.Int32Type.Kind(): + sdata := string(data) + var x int32 + var x1 int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + session.engine.dialect.URI().DBType == schemas.MYSQL { + if len(data) == 1 { + x = int32(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x1, err = strconv.ParseInt(sdata, 16, 64) + x = int32(x1) + } else if strings.HasPrefix(sdata, "0") { + x1, err = strconv.ParseInt(sdata, 8, 64) + x = int32(x1) + } else { + x1, err = strconv.ParseInt(sdata, 10, 64) + x = int32(x1) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*int8": + case schemas.Int8Type.Kind(): + sdata := string(data) + var x int8 + var x1 int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + strings.Contains(session.engine.DriverName(), "mysql") { + if len(data) == 1 { + x = int8(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x1, err = strconv.ParseInt(sdata, 16, 64) + x = int8(x1) + } else if strings.HasPrefix(sdata, "0") { + x1, err = strconv.ParseInt(sdata, 8, 64) + x = int8(x1) + } else { + x1, err = strconv.ParseInt(sdata, 10, 64) + x = int8(x1) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*int16": + case schemas.Int16Type.Kind(): + sdata := string(data) + var x int16 + var x1 int64 + var err error + // for mysql, when use bit, it returned \x01 + if col.SQLType.Name == schemas.Bit && + strings.Contains(session.engine.DriverName(), "mysql") { + if len(data) == 1 { + x = int16(data[0]) + } else { + x = 0 + } + } else if strings.HasPrefix(sdata, "0x") { + x1, err = strconv.ParseInt(sdata, 16, 64) + x = int16(x1) + } else if strings.HasPrefix(sdata, "0") { + x1, err = strconv.ParseInt(sdata, 8, 64) + x = int16(x1) + } else { + x1, err = strconv.ParseInt(sdata, 10, 64) + x = int16(x1) + } + if err != nil { + return fmt.Errorf("arg %v as int: %s", key, err.Error()) + } + fieldValue.Set(reflect.ValueOf(&x).Convert(fieldType)) + // case "*SomeStruct": + case reflect.Struct: + switch fieldType { + // case "*.time.Time": + case schemas.PtrTimeType: + x, err := session.byte2Time(col, data) + if err != nil { + return err + } + v = x + fieldValue.Set(reflect.ValueOf(&x)) + default: + if session.statement.UseCascade { + structInter := reflect.New(fieldType.Elem()) + table, err := session.engine.tagParser.ParseWithCache(structInter.Elem()) + if err != nil { + return err + } + + if len(table.PrimaryKeys) > 1 { + return errors.New("unsupported composited primary key cascade") + } + + var pk = make(schemas.PK, len(table.PrimaryKeys)) + rawValueType := table.ColumnType(table.PKColumns()[0].FieldName) + pk[0], err = str2PK(string(data), rawValueType) + if err != nil { + return err + } + + if !pk.IsZero() { + // !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch + // however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne + // property to be fetched lazily + has, err := session.ID(pk).NoCascade().get(structInter.Interface()) + if err != nil { + return err + } + if has { + v = structInter.Interface() + fieldValue.Set(reflect.ValueOf(v)) + } else { + return errors.New("cascade obj is not exist") + } + } + } else { + return fmt.Errorf("unsupported struct type in Scan: %s", fieldValue.Type().String()) + } + } + default: + return fmt.Errorf("unsupported type in Scan: %s", fieldValue.Type().String()) + } + default: + return fmt.Errorf("unsupported type in Scan: %s", fieldValue.Type().String()) + } + + return nil +} diff --git a/vendor/xorm.io/xorm/session_delete.go b/vendor/xorm.io/xorm/session_delete.go new file mode 100644 index 0000000000..13bf791fed --- /dev/null +++ b/vendor/xorm.io/xorm/session_delete.go @@ -0,0 +1,251 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "errors" + "fmt" + "strconv" + + "xorm.io/xorm/caches" + "xorm.io/xorm/schemas" +) + +var ( + // ErrNeedDeletedCond delete needs less one condition error + ErrNeedDeletedCond = errors.New("Delete action needs at least one condition") + + // ErrNotImplemented not implemented + ErrNotImplemented = errors.New("Not implemented") +) + +func (session *Session) cacheDelete(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error { + if table == nil || + session.tx != nil { + return ErrCacheFailed + } + + for _, filter := range session.engine.dialect.Filters() { + sqlStr = filter.Do(sqlStr) + } + + newsql := session.statement.ConvertIDSQL(sqlStr) + if newsql == "" { + return ErrCacheFailed + } + + cacher := session.engine.cacherMgr.GetCacher(tableName) + pkColumns := table.PKColumns() + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) + if err != nil { + resultsSlice, err := session.queryBytes(newsql, args...) + if err != nil { + return err + } + ids = make([]schemas.PK, 0) + if len(resultsSlice) > 0 { + for _, data := range resultsSlice { + var id int64 + var pk schemas.PK = make([]interface{}, 0) + for _, col := range pkColumns { + if v, ok := data[col.Name]; !ok { + return errors.New("no id") + } else if col.SQLType.IsText() { + pk = append(pk, string(v)) + } else if col.SQLType.IsNumeric() { + id, err = strconv.ParseInt(string(v), 10, 64) + if err != nil { + return err + } + pk = append(pk, id) + } else { + return errors.New("not supported primary key type") + } + } + ids = append(ids, pk) + } + } + } + + for _, id := range ids { + session.engine.logger.Debugf("[cache] delete cache obj: %v, %v", tableName, id) + sid, err := id.ToString() + if err != nil { + return err + } + cacher.DelBean(tableName, sid) + } + session.engine.logger.Debugf("[cache] clear cache table: %v", tableName) + cacher.ClearIds(tableName) + return nil +} + +// Delete records, bean's non-empty fields are conditions +func (session *Session) Delete(bean interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + if session.statement.LastError != nil { + return 0, session.statement.LastError + } + + if err := session.statement.SetRefBean(bean); err != nil { + return 0, err + } + + executeBeforeClosures(session, bean) + + if processor, ok := interface{}(bean).(BeforeDeleteProcessor); ok { + processor.BeforeDelete() + } + + condSQL, condArgs, err := session.statement.GenConds(bean) + if err != nil { + return 0, err + } + pLimitN := session.statement.LimitN + if len(condSQL) == 0 && (pLimitN == nil || *pLimitN == 0) { + return 0, ErrNeedDeletedCond + } + + var tableNameNoQuote = session.statement.TableName() + var tableName = session.engine.Quote(tableNameNoQuote) + var table = session.statement.RefTable + var deleteSQL string + if len(condSQL) > 0 { + deleteSQL = fmt.Sprintf("DELETE FROM %v WHERE %v", tableName, condSQL) + } else { + deleteSQL = fmt.Sprintf("DELETE FROM %v", tableName) + } + + var orderSQL string + if len(session.statement.OrderStr) > 0 { + orderSQL += fmt.Sprintf(" ORDER BY %s", session.statement.OrderStr) + } + if pLimitN != nil && *pLimitN > 0 { + limitNValue := *pLimitN + orderSQL += fmt.Sprintf(" LIMIT %d", limitNValue) + } + + if len(orderSQL) > 0 { + switch session.engine.dialect.URI().DBType { + case schemas.POSTGRES: + inSQL := fmt.Sprintf("ctid IN (SELECT ctid FROM %s%s)", tableName, orderSQL) + if len(condSQL) > 0 { + deleteSQL += " AND " + inSQL + } else { + deleteSQL += " WHERE " + inSQL + } + case schemas.SQLITE: + inSQL := fmt.Sprintf("rowid IN (SELECT rowid FROM %s%s)", tableName, orderSQL) + if len(condSQL) > 0 { + deleteSQL += " AND " + inSQL + } else { + deleteSQL += " WHERE " + inSQL + } + // TODO: how to handle delete limit on mssql? + case schemas.MSSQL: + return 0, ErrNotImplemented + default: + deleteSQL += orderSQL + } + } + + var realSQL string + argsForCache := make([]interface{}, 0, len(condArgs)*2) + if session.statement.GetUnscoped() || table.DeletedColumn() == nil { // tag "deleted" is disabled + realSQL = deleteSQL + copy(argsForCache, condArgs) + argsForCache = append(condArgs, argsForCache...) + } else { + // !oinume! sqlStrForCache and argsForCache is needed to behave as executing "DELETE FROM ..." for caches. + copy(argsForCache, condArgs) + argsForCache = append(condArgs, argsForCache...) + + deletedColumn := table.DeletedColumn() + realSQL = fmt.Sprintf("UPDATE %v SET %v = ? WHERE %v", + session.engine.Quote(session.statement.TableName()), + session.engine.Quote(deletedColumn.Name), + condSQL) + + if len(orderSQL) > 0 { + switch session.engine.dialect.URI().DBType { + case schemas.POSTGRES: + inSQL := fmt.Sprintf("ctid IN (SELECT ctid FROM %s%s)", tableName, orderSQL) + if len(condSQL) > 0 { + realSQL += " AND " + inSQL + } else { + realSQL += " WHERE " + inSQL + } + case schemas.SQLITE: + inSQL := fmt.Sprintf("rowid IN (SELECT rowid FROM %s%s)", tableName, orderSQL) + if len(condSQL) > 0 { + realSQL += " AND " + inSQL + } else { + realSQL += " WHERE " + inSQL + } + // TODO: how to handle delete limit on mssql? + case schemas.MSSQL: + return 0, ErrNotImplemented + default: + realSQL += orderSQL + } + } + + // !oinume! Insert nowTime to the head of session.statement.Params + condArgs = append(condArgs, "") + paramsLen := len(condArgs) + copy(condArgs[1:paramsLen], condArgs[0:paramsLen-1]) + + val, t := session.engine.nowTime(deletedColumn) + condArgs[0] = val + + var colName = deletedColumn.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } + + if cacher := session.engine.GetCacher(tableNameNoQuote); cacher != nil && session.statement.UseCache { + session.cacheDelete(table, tableNameNoQuote, deleteSQL, argsForCache...) + } + + session.statement.RefTable = table + res, err := session.exec(realSQL, condArgs...) + if err != nil { + return 0, err + } + + // handle after delete processors + if session.isAutoCommit { + for _, closure := range session.afterClosures { + closure(bean) + } + if processor, ok := interface{}(bean).(AfterDeleteProcessor); ok { + processor.AfterDelete() + } + } else { + lenAfterClosures := len(session.afterClosures) + if lenAfterClosures > 0 { + if value, has := session.afterDeleteBeans[bean]; has && value != nil { + *value = append(*value, session.afterClosures...) + } else { + afterClosures := make([]func(interface{}), lenAfterClosures) + copy(afterClosures, session.afterClosures) + session.afterDeleteBeans[bean] = &afterClosures + } + } else { + if _, ok := interface{}(bean).(AfterDeleteProcessor); ok { + session.afterDeleteBeans[bean] = nil + } + } + } + cleanupProcessorsClosures(&session.afterClosures) + // -- + + return res.RowsAffected() +} diff --git a/vendor/xorm.io/xorm/session_exist.go b/vendor/xorm.io/xorm/session_exist.go new file mode 100644 index 0000000000..e52c618e71 --- /dev/null +++ b/vendor/xorm.io/xorm/session_exist.go @@ -0,0 +1,29 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +// Exist returns true if the record exist otherwise return false +func (session *Session) Exist(bean ...interface{}) (bool, error) { + if session.isAutoClose { + defer session.Close() + } + + if session.statement.LastError != nil { + return false, session.statement.LastError + } + + sqlStr, args, err := session.statement.GenExistSQL(bean...) + if err != nil { + return false, err + } + + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return false, err + } + defer rows.Close() + + return rows.Next(), nil +} diff --git a/vendor/xorm.io/xorm/session_find.go b/vendor/xorm.io/xorm/session_find.go new file mode 100644 index 0000000000..642093f22a --- /dev/null +++ b/vendor/xorm.io/xorm/session_find.go @@ -0,0 +1,490 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "errors" + "fmt" + "reflect" + + "xorm.io/builder" + "xorm.io/xorm/caches" + "xorm.io/xorm/internal/statements" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +const ( + tpStruct = iota + tpNonStruct +) + +// Find retrieve records from table, condiBeans's non-empty fields +// are conditions. beans could be []Struct, []*Struct, map[int64]Struct +// map[int64]*Struct +func (session *Session) Find(rowsSlicePtr interface{}, condiBean ...interface{}) error { + if session.isAutoClose { + defer session.Close() + } + return session.find(rowsSlicePtr, condiBean...) +} + +// FindAndCount find the results and also return the counts +func (session *Session) FindAndCount(rowsSlicePtr interface{}, condiBean ...interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + session.autoResetStatement = false + err := session.find(rowsSlicePtr, condiBean...) + if err != nil { + return 0, err + } + + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) + if sliceValue.Kind() != reflect.Slice && sliceValue.Kind() != reflect.Map { + return 0, errors.New("needs a pointer to a slice or a map") + } + + sliceElementType := sliceValue.Type().Elem() + if sliceElementType.Kind() == reflect.Ptr { + sliceElementType = sliceElementType.Elem() + } + session.autoResetStatement = true + + if session.statement.SelectStr != "" { + session.statement.SelectStr = "" + } + if session.statement.OrderStr != "" { + session.statement.OrderStr = "" + } + if session.statement.LimitN != nil { + session.statement.LimitN = nil + } + if session.statement.Start > 0 { + session.statement.Start = 0 + } + + // session has stored the conditions so we use `unscoped` to avoid duplicated condition. + return session.Unscoped().Count(reflect.New(sliceElementType).Interface()) +} + +func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{}) error { + defer session.resetStatement() + if session.statement.LastError != nil { + return session.statement.LastError + } + + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) + var isSlice = sliceValue.Kind() == reflect.Slice + var isMap = sliceValue.Kind() == reflect.Map + if !isSlice && !isMap { + return errors.New("needs a pointer to a slice or a map") + } + + sliceElementType := sliceValue.Type().Elem() + + var tp = tpStruct + if session.statement.RefTable == nil { + if sliceElementType.Kind() == reflect.Ptr { + if sliceElementType.Elem().Kind() == reflect.Struct { + pv := reflect.New(sliceElementType.Elem()) + if err := session.statement.SetRefValue(pv); err != nil { + return err + } + } else { + tp = tpNonStruct + } + } else if sliceElementType.Kind() == reflect.Struct { + pv := reflect.New(sliceElementType) + if err := session.statement.SetRefValue(pv); err != nil { + return err + } + } else { + tp = tpNonStruct + } + } + + var ( + table = session.statement.RefTable + addedTableName = (len(session.statement.JoinStr) > 0) + autoCond builder.Cond + ) + if tp == tpStruct { + if !session.statement.NoAutoCondition && len(condiBean) > 0 { + condTable, err := session.engine.tagParser.Parse(reflect.ValueOf(condiBean[0])) + if err != nil { + return err + } + autoCond, err = session.statement.BuildConds(condTable, condiBean[0], true, true, false, true, addedTableName) + if err != nil { + return err + } + } else { + if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled + autoCond = session.statement.CondDeleted(col) + } + } + } + + // if it's a map with Cols but primary key not in column list, we still need the primary key + if isMap && !session.statement.ColumnMap.IsEmpty() { + for _, k := range session.statement.RefTable.PrimaryKeys { + session.statement.ColumnMap.Add(k) + } + } + + sqlStr, args, err := session.statement.GenFindSQL(autoCond) + if err != nil { + return err + } + + if session.statement.ColumnMap.IsEmpty() && session.canCache() { + if cacher := session.engine.GetCacher(session.statement.TableName()); cacher != nil && + !session.statement.IsDistinct && + !session.statement.GetUnscoped() { + err = session.cacheFind(sliceElementType, sqlStr, rowsSlicePtr, args...) + if err != ErrCacheFailed { + return err + } + err = nil // !nashtsai! reset err to nil for ErrCacheFailed + session.engine.logger.Warnf("Cache Find Failed") + } + } + + return session.noCacheFind(table, sliceValue, sqlStr, args...) +} + +func (session *Session) noCacheFind(table *schemas.Table, containerValue reflect.Value, sqlStr string, args ...interface{}) error { + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return err + } + defer rows.Close() + + fields, err := rows.Columns() + if err != nil { + return err + } + + var newElemFunc func(fields []string) reflect.Value + elemType := containerValue.Type().Elem() + var isPointer bool + if elemType.Kind() == reflect.Ptr { + isPointer = true + elemType = elemType.Elem() + } + if elemType.Kind() == reflect.Ptr { + return errors.New("pointer to pointer is not supported") + } + + newElemFunc = func(fields []string) reflect.Value { + switch elemType.Kind() { + case reflect.Slice: + slice := reflect.MakeSlice(elemType, len(fields), len(fields)) + x := reflect.New(slice.Type()) + x.Elem().Set(slice) + return x + case reflect.Map: + mp := reflect.MakeMap(elemType) + x := reflect.New(mp.Type()) + x.Elem().Set(mp) + return x + } + return reflect.New(elemType) + } + + var containerValueSetFunc func(*reflect.Value, schemas.PK) error + + if containerValue.Kind() == reflect.Slice { + containerValueSetFunc = func(newValue *reflect.Value, pk schemas.PK) error { + if isPointer { + containerValue.Set(reflect.Append(containerValue, newValue.Elem().Addr())) + } else { + containerValue.Set(reflect.Append(containerValue, newValue.Elem())) + } + return nil + } + } else { + keyType := containerValue.Type().Key() + if len(table.PrimaryKeys) == 0 { + return errors.New("don't support multiple primary key's map has non-slice key type") + } + if len(table.PrimaryKeys) > 1 && keyType.Kind() != reflect.Slice { + return errors.New("don't support multiple primary key's map has non-slice key type") + } + + containerValueSetFunc = func(newValue *reflect.Value, pk schemas.PK) error { + keyValue := reflect.New(keyType) + err := convertPKToValue(table, keyValue.Interface(), pk) + if err != nil { + return err + } + if isPointer { + containerValue.SetMapIndex(keyValue.Elem(), newValue.Elem().Addr()) + } else { + containerValue.SetMapIndex(keyValue.Elem(), newValue.Elem()) + } + return nil + } + } + + if elemType.Kind() == reflect.Struct { + var newValue = newElemFunc(fields) + dataStruct := utils.ReflectValue(newValue.Interface()) + tb, err := session.engine.tagParser.ParseWithCache(dataStruct) + if err != nil { + return err + } + err = session.rows2Beans(rows, fields, tb, newElemFunc, containerValueSetFunc) + rows.Close() + if err != nil { + return err + } + return session.executeProcessors() + } + + for rows.Next() { + var newValue = newElemFunc(fields) + bean := newValue.Interface() + + switch elemType.Kind() { + case reflect.Slice: + err = rows.ScanSlice(bean) + case reflect.Map: + err = rows.ScanMap(bean) + default: + err = rows.Scan(bean) + } + + if err != nil { + return err + } + + if err := containerValueSetFunc(&newValue, nil); err != nil { + return err + } + } + return nil +} + +func convertPKToValue(table *schemas.Table, dst interface{}, pk schemas.PK) error { + cols := table.PKColumns() + if len(cols) == 1 { + return convertAssign(dst, pk[0]) + } + + dst = pk + return nil +} + +func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr interface{}, args ...interface{}) (err error) { + if !session.canCache() || + utils.IndexNoCase(sqlStr, "having") != -1 || + utils.IndexNoCase(sqlStr, "group by") != -1 { + return ErrCacheFailed + } + + tableName := session.statement.TableName() + cacher := session.engine.cacherMgr.GetCacher(tableName) + if cacher == nil { + return nil + } + + for _, filter := range session.engine.dialect.Filters() { + sqlStr = filter.Do(sqlStr) + } + + newsql := session.statement.ConvertIDSQL(sqlStr) + if newsql == "" { + return ErrCacheFailed + } + + table := session.statement.RefTable + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) + if err != nil { + rows, err := session.queryRows(newsql, args...) + if err != nil { + return err + } + defer rows.Close() + + var i int + ids = make([]schemas.PK, 0) + for rows.Next() { + i++ + if i > 500 { + session.engine.logger.Debugf("[cacheFind] ids length > 500, no cache") + return ErrCacheFailed + } + var res = make([]string, len(table.PrimaryKeys)) + err = rows.ScanSlice(&res) + if err != nil { + return err + } + var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) + for i, col := range table.PKColumns() { + pk[i], err = col.ConvertID(res[i]) + if err != nil { + return err + } + } + + ids = append(ids, pk) + } + + session.engine.logger.Debugf("[cache] cache sql: %v, %v, %v, %v, %v", ids, tableName, sqlStr, newsql, args) + err = caches.PutCacheSql(cacher, ids, tableName, newsql, args) + if err != nil { + return err + } + } else { + session.engine.logger.Debugf("[cache] cache hit sql: %v, %v, %v, %v", tableName, sqlStr, newsql, args) + } + + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) + + ididxes := make(map[string]int) + var ides []schemas.PK + var temps = make([]interface{}, len(ids)) + + for idx, id := range ids { + sid, err := id.ToString() + if err != nil { + return err + } + bean := cacher.GetBean(tableName, sid) + + // fix issue #894 + isHit := func() (ht bool) { + if bean == nil { + ht = false + return + } + ckb := reflect.ValueOf(bean).Elem().Type() + ht = ckb == t + if !ht && t.Kind() == reflect.Ptr { + ht = t.Elem() == ckb + } + return + } + if !isHit() { + ides = append(ides, id) + ididxes[sid] = idx + } else { + session.engine.logger.Debugf("[cache] cache hit bean: %v, %v, %v", tableName, id, bean) + + pk, err := table.IDOfV(reflect.ValueOf(bean)) + if err != nil { + return err + } + + xid, err := pk.ToString() + if err != nil { + return err + } + + if sid != xid { + session.engine.logger.Errorf("[cache] error cache: %v, %v, %v", xid, sid, bean) + return ErrCacheFailed + } + temps[idx] = bean + } + } + + if len(ides) > 0 { + slices := reflect.New(reflect.SliceOf(t)) + beans := slices.Interface() + + statement := session.statement + session.statement = statements.NewStatement( + session.engine.dialect, + session.engine.tagParser, + session.engine.DatabaseTZ, + ) + if len(table.PrimaryKeys) == 1 { + ff := make([]interface{}, 0, len(ides)) + for _, ie := range ides { + ff = append(ff, ie[0]) + } + + session.In("`"+table.PrimaryKeys[0]+"`", ff...) + } else { + for _, ie := range ides { + cond := builder.NewCond() + for i, name := range table.PrimaryKeys { + cond = cond.And(builder.Eq{"`" + name + "`": ie[i]}) + } + session.Or(cond) + } + } + + err = session.NoCache().Table(tableName).find(beans) + if err != nil { + return err + } + session.statement = statement + + vs := reflect.Indirect(reflect.ValueOf(beans)) + for i := 0; i < vs.Len(); i++ { + rv := vs.Index(i) + if rv.Kind() != reflect.Ptr { + rv = rv.Addr() + } + id, err := table.IDOfV(rv) + if err != nil { + return err + } + sid, err := id.ToString() + if err != nil { + return err + } + + bean := rv.Interface() + temps[ididxes[sid]] = bean + session.engine.logger.Debugf("[cache] cache bean: %v, %v, %v, %v", tableName, id, bean, temps) + cacher.PutBean(tableName, sid, bean) + } + } + + for j := 0; j < len(temps); j++ { + bean := temps[j] + if bean == nil { + session.engine.logger.Warnf("[cache] cache no hit: %v, %v, %v", tableName, ids[j], temps) + // return errors.New("cache error") // !nashtsai! no need to return error, but continue instead + continue + } + if sliceValue.Kind() == reflect.Slice { + if t.Kind() == reflect.Ptr { + sliceValue.Set(reflect.Append(sliceValue, reflect.ValueOf(bean))) + } else { + sliceValue.Set(reflect.Append(sliceValue, reflect.Indirect(reflect.ValueOf(bean)))) + } + } else if sliceValue.Kind() == reflect.Map { + var key = ids[j] + keyType := sliceValue.Type().Key() + var ikey interface{} + if len(key) == 1 { + ikey, err = str2PK(fmt.Sprintf("%v", key[0]), keyType) + if err != nil { + return err + } + } else { + if keyType.Kind() != reflect.Slice { + return errors.New("table have multiple primary keys, key is not schemas.PK or slice") + } + ikey = key + } + + if t.Kind() == reflect.Ptr { + sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.ValueOf(bean)) + } else { + sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.Indirect(reflect.ValueOf(bean))) + } + } + } + + return nil +} diff --git a/vendor/xorm.io/xorm/session_get.go b/vendor/xorm.io/xorm/session_get.go new file mode 100644 index 0000000000..afedcd1f08 --- /dev/null +++ b/vendor/xorm.io/xorm/session_get.go @@ -0,0 +1,358 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" + "strconv" + + "xorm.io/xorm/caches" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +// Get retrieve one record from database, bean's non-empty fields +// will be as conditions +func (session *Session) Get(bean interface{}) (bool, error) { + if session.isAutoClose { + defer session.Close() + } + return session.get(bean) +} + +func (session *Session) get(bean interface{}) (bool, error) { + defer session.resetStatement() + + if session.statement.LastError != nil { + return false, session.statement.LastError + } + + beanValue := reflect.ValueOf(bean) + if beanValue.Kind() != reflect.Ptr { + return false, errors.New("needs a pointer to a value") + } else if beanValue.Elem().Kind() == reflect.Ptr { + return false, errors.New("a pointer to a pointer is not allowed") + } + + if beanValue.Elem().Kind() == reflect.Struct { + if err := session.statement.SetRefBean(bean); err != nil { + return false, err + } + } + + var sqlStr string + var args []interface{} + var err error + + if session.statement.RawSQL == "" { + if len(session.statement.TableName()) <= 0 { + return false, ErrTableNotFound + } + session.statement.Limit(1) + sqlStr, args, err = session.statement.GenGetSQL(bean) + if err != nil { + return false, err + } + } else { + sqlStr = session.statement.GenRawSQL() + args = session.statement.RawParams + } + + table := session.statement.RefTable + + if session.statement.ColumnMap.IsEmpty() && session.canCache() && beanValue.Elem().Kind() == reflect.Struct { + if cacher := session.engine.GetCacher(session.statement.TableName()); cacher != nil && + !session.statement.GetUnscoped() { + has, err := session.cacheGet(bean, sqlStr, args...) + if err != ErrCacheFailed { + return has, err + } + } + } + + context := session.statement.Context + if context != nil { + res := context.Get(fmt.Sprintf("%v-%v", sqlStr, args)) + if res != nil { + session.engine.logger.Debugf("hit context cache: %s", sqlStr) + + structValue := reflect.Indirect(reflect.ValueOf(bean)) + structValue.Set(reflect.Indirect(reflect.ValueOf(res))) + session.lastSQL = "" + session.lastSQLArgs = nil + return true, nil + } + } + + has, err := session.nocacheGet(beanValue.Elem().Kind(), table, bean, sqlStr, args...) + if err != nil || !has { + return has, err + } + + if context != nil { + context.Put(fmt.Sprintf("%v-%v", sqlStr, args), bean) + } + + return true, nil +} + +func (session *Session) nocacheGet(beanKind reflect.Kind, table *schemas.Table, bean interface{}, sqlStr string, args ...interface{}) (bool, error) { + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return false, err + } + defer rows.Close() + + if !rows.Next() { + if rows.Err() != nil { + return false, rows.Err() + } + return false, nil + } + + switch bean.(type) { + case sql.NullInt64, sql.NullBool, sql.NullFloat64, sql.NullString: + return true, rows.Scan(&bean) + case *sql.NullInt64, *sql.NullBool, *sql.NullFloat64, *sql.NullString: + return true, rows.Scan(bean) + case *string: + var res sql.NullString + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*string)) = res.String + } + return true, nil + case *int: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*int)) = int(res.Int64) + } + return true, nil + case *int8: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*int8)) = int8(res.Int64) + } + return true, nil + case *int16: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*int16)) = int16(res.Int64) + } + return true, nil + case *int32: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*int32)) = int32(res.Int64) + } + return true, nil + case *int64: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*int64)) = int64(res.Int64) + } + return true, nil + case *uint: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*uint)) = uint(res.Int64) + } + return true, nil + case *uint8: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*uint8)) = uint8(res.Int64) + } + return true, nil + case *uint16: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*uint16)) = uint16(res.Int64) + } + return true, nil + case *uint32: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*uint32)) = uint32(res.Int64) + } + return true, nil + case *uint64: + var res sql.NullInt64 + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*uint64)) = uint64(res.Int64) + } + return true, nil + case *bool: + var res sql.NullBool + if err := rows.Scan(&res); err != nil { + return true, err + } + if res.Valid { + *(bean.(*bool)) = res.Bool + } + return true, nil + } + + switch beanKind { + case reflect.Struct: + fields, err := rows.Columns() + if err != nil { + // WARN: Alougth rows return true, but get fields failed + return true, err + } + + scanResults, err := session.row2Slice(rows, fields, bean) + if err != nil { + return false, err + } + // close it before convert data + rows.Close() + + dataStruct := utils.ReflectValue(bean) + _, err = session.slice2Bean(scanResults, fields, bean, &dataStruct, table) + if err != nil { + return true, err + } + + return true, session.executeProcessors() + case reflect.Slice: + err = rows.ScanSlice(bean) + case reflect.Map: + err = rows.ScanMap(bean) + case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + err = rows.Scan(bean) + default: + err = rows.Scan(bean) + } + + return true, err +} + +func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interface{}) (has bool, err error) { + // if has no reftable, then don't use cache currently + if !session.canCache() { + return false, ErrCacheFailed + } + + for _, filter := range session.engine.dialect.Filters() { + sqlStr = filter.Do(sqlStr) + } + newsql := session.statement.ConvertIDSQL(sqlStr) + if newsql == "" { + return false, ErrCacheFailed + } + + tableName := session.statement.TableName() + cacher := session.engine.cacherMgr.GetCacher(tableName) + + session.engine.logger.Debugf("[cache] Get SQL: %s, %v", newsql, args) + table := session.statement.RefTable + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args) + if err != nil { + var res = make([]string, len(table.PrimaryKeys)) + rows, err := session.NoCache().queryRows(newsql, args...) + if err != nil { + return false, err + } + defer rows.Close() + + if rows.Next() { + err = rows.ScanSlice(&res) + if err != nil { + return false, err + } + } else { + return false, ErrCacheFailed + } + + var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) + for i, col := range table.PKColumns() { + if col.SQLType.IsText() { + pk[i] = res[i] + } else if col.SQLType.IsNumeric() { + n, err := strconv.ParseInt(res[i], 10, 64) + if err != nil { + return false, err + } + pk[i] = n + } else { + return false, errors.New("unsupported") + } + } + + ids = []schemas.PK{pk} + session.engine.logger.Debugf("[cache] cache ids: %s, %v", newsql, ids) + err = caches.PutCacheSql(cacher, ids, tableName, newsql, args) + if err != nil { + return false, err + } + } else { + session.engine.logger.Debugf("[cache] cache hit: %s, %v", newsql, ids) + } + + if len(ids) > 0 { + structValue := reflect.Indirect(reflect.ValueOf(bean)) + id := ids[0] + session.engine.logger.Debugf("[cache] get bean: %s, %v", tableName, id) + sid, err := id.ToString() + if err != nil { + return false, err + } + cacheBean := cacher.GetBean(tableName, sid) + if cacheBean == nil { + cacheBean = bean + has, err = session.nocacheGet(reflect.Struct, table, cacheBean, sqlStr, args...) + if err != nil || !has { + return has, err + } + + session.engine.logger.Debugf("[cache] cache bean: %s, %v, %v", tableName, id, cacheBean) + cacher.PutBean(tableName, sid, cacheBean) + } else { + session.engine.logger.Debugf("[cache] cache hit: %s, %v, %v", tableName, id, cacheBean) + has = true + } + structValue.Set(reflect.Indirect(reflect.ValueOf(cacheBean))) + + return has, nil + } + return false, nil +} diff --git a/vendor/xorm.io/xorm/session_insert.go b/vendor/xorm.io/xorm/session_insert.go new file mode 100644 index 0000000000..5f9681512e --- /dev/null +++ b/vendor/xorm.io/xorm/session_insert.go @@ -0,0 +1,643 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +// ErrNoElementsOnSlice represents an error there is no element when insert +var ErrNoElementsOnSlice = errors.New("No element on slice when insert") + +// Insert insert one or more beans +func (session *Session) Insert(beans ...interface{}) (int64, error) { + var affected int64 + var err error + + if session.isAutoClose { + defer session.Close() + } + + session.autoResetStatement = false + defer func() { + session.autoResetStatement = true + session.resetStatement() + }() + + for _, bean := range beans { + switch bean.(type) { + case map[string]interface{}: + cnt, err := session.insertMapInterface(bean.(map[string]interface{})) + if err != nil { + return affected, err + } + affected += cnt + case []map[string]interface{}: + s := bean.([]map[string]interface{}) + for i := 0; i < len(s); i++ { + cnt, err := session.insertMapInterface(s[i]) + if err != nil { + return affected, err + } + affected += cnt + } + case map[string]string: + cnt, err := session.insertMapString(bean.(map[string]string)) + if err != nil { + return affected, err + } + affected += cnt + case []map[string]string: + s := bean.([]map[string]string) + for i := 0; i < len(s); i++ { + cnt, err := session.insertMapString(s[i]) + if err != nil { + return affected, err + } + affected += cnt + } + default: + sliceValue := reflect.Indirect(reflect.ValueOf(bean)) + if sliceValue.Kind() == reflect.Slice { + size := sliceValue.Len() + if size <= 0 { + return 0, ErrNoElementsOnSlice + } + + cnt, err := session.innerInsertMulti(bean) + if err != nil { + return affected, err + } + affected += cnt + } else { + cnt, err := session.innerInsert(bean) + if err != nil { + return affected, err + } + affected += cnt + } + } + } + + return affected, err +} + +func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error) { + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) + if sliceValue.Kind() != reflect.Slice { + return 0, errors.New("needs a pointer to a slice") + } + + if sliceValue.Len() <= 0 { + return 0, errors.New("could not insert a empty slice") + } + + if err := session.statement.SetRefBean(sliceValue.Index(0).Interface()); err != nil { + return 0, err + } + + tableName := session.statement.TableName() + if len(tableName) <= 0 { + return 0, ErrTableNotFound + } + + var ( + table = session.statement.RefTable + size = sliceValue.Len() + colNames []string + colMultiPlaces []string + args []interface{} + cols []*schemas.Column + ) + + for i := 0; i < size; i++ { + v := sliceValue.Index(i) + var vv reflect.Value + switch v.Kind() { + case reflect.Interface: + vv = reflect.Indirect(v.Elem()) + default: + vv = reflect.Indirect(v) + } + elemValue := v.Interface() + var colPlaces []string + + // handle BeforeInsertProcessor + // !nashtsai! does user expect it's same slice to passed closure when using Before()/After() when insert multi?? + for _, closure := range session.beforeClosures { + closure(elemValue) + } + + if processor, ok := interface{}(elemValue).(BeforeInsertProcessor); ok { + processor.BeforeInsert() + } + // -- + + for _, col := range table.Columns() { + ptrFieldValue, err := col.ValueOfV(&vv) + if err != nil { + return 0, err + } + fieldValue := *ptrFieldValue + if col.IsAutoIncrement && utils.IsZero(fieldValue.Interface()) { + continue + } + if col.MapType == schemas.ONLYFROMDB { + continue + } + if col.IsDeleted { + continue + } + if session.statement.OmitColumnMap.Contain(col.Name) { + continue + } + if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { + continue + } + if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime { + val, t := session.engine.nowTime(col) + args = append(args, val) + + var colName = col.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } else if col.IsVersion && session.statement.CheckVersion { + args = append(args, 1) + var colName = col.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnInt(bean, col, 1) + }) + } else { + arg, err := session.statement.Value2Interface(col, fieldValue) + if err != nil { + return 0, err + } + args = append(args, arg) + } + + if i == 0 { + colNames = append(colNames, col.Name) + cols = append(cols, col) + } + colPlaces = append(colPlaces, "?") + } + + colMultiPlaces = append(colMultiPlaces, strings.Join(colPlaces, ", ")) + } + cleanupProcessorsClosures(&session.beforeClosures) + + quoter := session.engine.dialect.Quoter() + var sql string + colStr := quoter.Join(colNames, ",") + if session.engine.dialect.URI().DBType == schemas.ORACLE { + temp := fmt.Sprintf(") INTO %s (%v) VALUES (", + quoter.Quote(tableName), + colStr) + sql = fmt.Sprintf("INSERT ALL INTO %s (%v) VALUES (%v) SELECT 1 FROM DUAL", + quoter.Quote(tableName), + colStr, + strings.Join(colMultiPlaces, temp)) + } else { + sql = fmt.Sprintf("INSERT INTO %s (%v) VALUES (%v)", + quoter.Quote(tableName), + colStr, + strings.Join(colMultiPlaces, "),(")) + } + res, err := session.exec(sql, args...) + if err != nil { + return 0, err + } + + session.cacheInsert(tableName) + + lenAfterClosures := len(session.afterClosures) + for i := 0; i < size; i++ { + elemValue := reflect.Indirect(sliceValue.Index(i)).Addr().Interface() + + // handle AfterInsertProcessor + if session.isAutoCommit { + // !nashtsai! does user expect it's same slice to passed closure when using Before()/After() when insert multi?? + for _, closure := range session.afterClosures { + closure(elemValue) + } + if processor, ok := elemValue.(AfterInsertProcessor); ok { + processor.AfterInsert() + } + } else { + if lenAfterClosures > 0 { + if value, has := session.afterInsertBeans[elemValue]; has && value != nil { + *value = append(*value, session.afterClosures...) + } else { + afterClosures := make([]func(interface{}), lenAfterClosures) + copy(afterClosures, session.afterClosures) + session.afterInsertBeans[elemValue] = &afterClosures + } + } else { + if _, ok := elemValue.(AfterInsertProcessor); ok { + session.afterInsertBeans[elemValue] = nil + } + } + } + } + + cleanupProcessorsClosures(&session.afterClosures) + return res.RowsAffected() +} + +// InsertMulti insert multiple records +func (session *Session) InsertMulti(rowsSlicePtr interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) + if sliceValue.Kind() != reflect.Slice { + return 0, ErrPtrSliceType + } + + if sliceValue.Len() <= 0 { + return 0, ErrNoElementsOnSlice + } + + return session.innerInsertMulti(rowsSlicePtr) +} + +func (session *Session) innerInsert(bean interface{}) (int64, error) { + if err := session.statement.SetRefBean(bean); err != nil { + return 0, err + } + if len(session.statement.TableName()) <= 0 { + return 0, ErrTableNotFound + } + + // handle BeforeInsertProcessor + for _, closure := range session.beforeClosures { + closure(bean) + } + cleanupProcessorsClosures(&session.beforeClosures) // cleanup after used + + if processor, ok := interface{}(bean).(BeforeInsertProcessor); ok { + processor.BeforeInsert() + } + + var tableName = session.statement.TableName() + table := session.statement.RefTable + + colNames, args, err := session.genInsertColumns(bean) + if err != nil { + return 0, err + } + + sqlStr, args, err := session.statement.GenInsertSQL(colNames, args) + if err != nil { + return 0, err + } + + handleAfterInsertProcessorFunc := func(bean interface{}) { + if session.isAutoCommit { + for _, closure := range session.afterClosures { + closure(bean) + } + if processor, ok := interface{}(bean).(AfterInsertProcessor); ok { + processor.AfterInsert() + } + } else { + lenAfterClosures := len(session.afterClosures) + if lenAfterClosures > 0 { + if value, has := session.afterInsertBeans[bean]; has && value != nil { + *value = append(*value, session.afterClosures...) + } else { + afterClosures := make([]func(interface{}), lenAfterClosures) + copy(afterClosures, session.afterClosures) + session.afterInsertBeans[bean] = &afterClosures + } + + } else { + if _, ok := interface{}(bean).(AfterInsertProcessor); ok { + session.afterInsertBeans[bean] = nil + } + } + } + cleanupProcessorsClosures(&session.afterClosures) // cleanup after used + } + + // for postgres, many of them didn't implement lastInsertId, so we should + // implemented it ourself. + if session.engine.dialect.URI().DBType == schemas.ORACLE && len(table.AutoIncrement) > 0 { + res, err := session.queryBytes("select seq_atable.currval from dual", args...) + if err != nil { + return 0, err + } + + defer handleAfterInsertProcessorFunc(bean) + + session.cacheInsert(tableName) + + if table.Version != "" && session.statement.CheckVersion { + verValue, err := table.VersionColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } else if verValue.IsValid() && verValue.CanSet() { + session.incrVersionFieldValue(verValue) + } + } + + if len(res) < 1 { + return 0, errors.New("insert no error but not returned id") + } + + idByte := res[0][table.AutoIncrement] + id, err := strconv.ParseInt(string(idByte), 10, 64) + if err != nil || id <= 0 { + return 1, err + } + + aiValue, err := table.AutoIncrColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } + + if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { + return 1, nil + } + + aiValue.Set(int64ToIntValue(id, aiValue.Type())) + + return 1, nil + } else if len(table.AutoIncrement) > 0 && (session.engine.dialect.URI().DBType == schemas.POSTGRES || + session.engine.dialect.URI().DBType == schemas.MSSQL) { + res, err := session.queryBytes(sqlStr, args...) + + if err != nil { + return 0, err + } + defer handleAfterInsertProcessorFunc(bean) + + session.cacheInsert(tableName) + + if table.Version != "" && session.statement.CheckVersion { + verValue, err := table.VersionColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } else if verValue.IsValid() && verValue.CanSet() { + session.incrVersionFieldValue(verValue) + } + } + + if len(res) < 1 { + return 0, errors.New("insert successfully but not returned id") + } + + idByte := res[0][table.AutoIncrement] + id, err := strconv.ParseInt(string(idByte), 10, 64) + if err != nil || id <= 0 { + return 1, err + } + + aiValue, err := table.AutoIncrColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } + + if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { + return 1, nil + } + + aiValue.Set(int64ToIntValue(id, aiValue.Type())) + + return 1, nil + } + + res, err := session.exec(sqlStr, args...) + if err != nil { + return 0, err + } + + defer handleAfterInsertProcessorFunc(bean) + + session.cacheInsert(tableName) + + if table.Version != "" && session.statement.CheckVersion { + verValue, err := table.VersionColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } else if verValue.IsValid() && verValue.CanSet() { + session.incrVersionFieldValue(verValue) + } + } + + if table.AutoIncrement == "" { + return res.RowsAffected() + } + + var id int64 + id, err = res.LastInsertId() + if err != nil || id <= 0 { + return res.RowsAffected() + } + + aiValue, err := table.AutoIncrColumn().ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } + + if aiValue == nil || !aiValue.IsValid() || !aiValue.CanSet() { + return res.RowsAffected() + } + + aiValue.Set(int64ToIntValue(id, aiValue.Type())) + + return res.RowsAffected() +} + +// InsertOne insert only one struct into database as a record. +// The in parameter bean must a struct or a point to struct. The return +// parameter is inserted and error +func (session *Session) InsertOne(bean interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + return session.innerInsert(bean) +} + +func (session *Session) cacheInsert(table string) error { + if !session.statement.UseCache { + return nil + } + cacher := session.engine.cacherMgr.GetCacher(table) + if cacher == nil { + return nil + } + session.engine.logger.Debugf("[cache] clear SQL: %v", table) + cacher.ClearIds(table) + return nil +} + +// genInsertColumns generates insert needed columns +func (session *Session) genInsertColumns(bean interface{}) ([]string, []interface{}, error) { + table := session.statement.RefTable + colNames := make([]string, 0, len(table.ColumnsSeq())) + args := make([]interface{}, 0, len(table.ColumnsSeq())) + + for _, col := range table.Columns() { + if col.MapType == schemas.ONLYFROMDB { + continue + } + + if col.IsDeleted { + continue + } + + if session.statement.OmitColumnMap.Contain(col.Name) { + continue + } + + if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { + continue + } + + if session.statement.IncrColumns.IsColExist(col.Name) { + continue + } else if session.statement.DecrColumns.IsColExist(col.Name) { + continue + } else if session.statement.ExprColumns.IsColExist(col.Name) { + continue + } + + fieldValuePtr, err := col.ValueOf(bean) + if err != nil { + return nil, nil, err + } + fieldValue := *fieldValuePtr + + if col.IsAutoIncrement && utils.IsValueZero(fieldValue) { + continue + } + + // !evalphobia! set fieldValue as nil when column is nullable and zero-value + if _, ok := getFlagForColumn(session.statement.NullableMap, col); ok { + if col.Nullable && utils.IsValueZero(fieldValue) { + var nilValue *int + fieldValue = reflect.ValueOf(nilValue) + } + } + + if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime /*&& isZero(fieldValue.Interface())*/ { + // if time is non-empty, then set to auto time + val, t := session.engine.nowTime(col) + args = append(args, val) + + var colName = col.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } else if col.IsVersion && session.statement.CheckVersion { + args = append(args, 1) + } else { + arg, err := session.statement.Value2Interface(col, fieldValue) + if err != nil { + return colNames, args, err + } + args = append(args, arg) + } + + colNames = append(colNames, col.Name) + } + return colNames, args, nil +} + +func (session *Session) insertMapInterface(m map[string]interface{}) (int64, error) { + if len(m) == 0 { + return 0, ErrParamsType + } + + tableName := session.statement.TableName() + if len(tableName) <= 0 { + return 0, ErrTableNotFound + } + + var columns = make([]string, 0, len(m)) + exprs := session.statement.ExprColumns + for k := range m { + if !exprs.IsColExist(k) { + columns = append(columns, k) + } + } + sort.Strings(columns) + + var args = make([]interface{}, 0, len(m)) + for _, colName := range columns { + args = append(args, m[colName]) + } + + return session.insertMap(columns, args) +} + +func (session *Session) insertMapString(m map[string]string) (int64, error) { + if len(m) == 0 { + return 0, ErrParamsType + } + + tableName := session.statement.TableName() + if len(tableName) <= 0 { + return 0, ErrTableNotFound + } + + var columns = make([]string, 0, len(m)) + exprs := session.statement.ExprColumns + for k := range m { + if !exprs.IsColExist(k) { + columns = append(columns, k) + } + } + + sort.Strings(columns) + + var args = make([]interface{}, 0, len(m)) + for _, colName := range columns { + args = append(args, m[colName]) + } + + return session.insertMap(columns, args) +} + +func (session *Session) insertMap(columns []string, args []interface{}) (int64, error) { + tableName := session.statement.TableName() + if len(tableName) <= 0 { + return 0, ErrTableNotFound + } + + sql, args, err := session.statement.GenInsertMapSQL(columns, args) + if err != nil { + return 0, err + } + + if err := session.cacheInsert(tableName); err != nil { + return 0, err + } + + res, err := session.exec(sql, args...) + if err != nil { + return 0, err + } + affected, err := res.RowsAffected() + if err != nil { + return 0, err + } + return affected, nil +} diff --git a/vendor/xorm.io/xorm/session_iterate.go b/vendor/xorm.io/xorm/session_iterate.go new file mode 100644 index 0000000000..8cab8f48f4 --- /dev/null +++ b/vendor/xorm.io/xorm/session_iterate.go @@ -0,0 +1,105 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "reflect" + + "xorm.io/xorm/internal/utils" +) + +// IterFunc only use by Iterate +type IterFunc func(idx int, bean interface{}) error + +// Rows return sql.Rows compatible Rows obj, as a forward Iterator object for iterating record by record, bean's non-empty fields +// are conditions. +func (session *Session) Rows(bean interface{}) (*Rows, error) { + return newRows(session, bean) +} + +// Iterate record by record handle records from table, condiBeans's non-empty fields +// are conditions. beans could be []Struct, []*Struct, map[int64]Struct +// map[int64]*Struct +func (session *Session) Iterate(bean interface{}, fun IterFunc) error { + if session.isAutoClose { + defer session.Close() + } + + if session.statement.LastError != nil { + return session.statement.LastError + } + + if session.statement.BufferSize > 0 { + return session.bufferIterate(bean, fun) + } + + rows, err := session.Rows(bean) + if err != nil { + return err + } + defer rows.Close() + + i := 0 + for rows.Next() { + b := reflect.New(rows.beanType).Interface() + err = rows.Scan(b) + if err != nil { + return err + } + err = fun(i, b) + if err != nil { + return err + } + i++ + } + return err +} + +// BufferSize sets the buffersize for iterate +func (session *Session) BufferSize(size int) *Session { + session.statement.BufferSize = size + return session +} + +func (session *Session) bufferIterate(bean interface{}, fun IterFunc) error { + var bufferSize = session.statement.BufferSize + var pLimitN = session.statement.LimitN + if pLimitN != nil && bufferSize > *pLimitN { + bufferSize = *pLimitN + } + var start = session.statement.Start + v := utils.ReflectValue(bean) + sliceType := reflect.SliceOf(v.Type()) + var idx = 0 + session.autoResetStatement = false + defer func() { + session.autoResetStatement = true + }() + + for bufferSize > 0 { + slice := reflect.New(sliceType) + if err := session.NoCache().Limit(bufferSize, start).find(slice.Interface(), bean); err != nil { + return err + } + + for i := 0; i < slice.Elem().Len(); i++ { + if err := fun(idx, slice.Elem().Index(i).Addr().Interface()); err != nil { + return err + } + idx++ + } + + if bufferSize > slice.Elem().Len() { + break + } + + start = start + slice.Elem().Len() + if pLimitN != nil && start+bufferSize > *pLimitN { + bufferSize = *pLimitN - start + } + } + + return nil +} diff --git a/vendor/xorm.io/xorm/session_query.go b/vendor/xorm.io/xorm/session_query.go new file mode 100644 index 0000000000..1213646611 --- /dev/null +++ b/vendor/xorm.io/xorm/session_query.go @@ -0,0 +1,257 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "fmt" + "reflect" + "strconv" + "time" + + "xorm.io/xorm/core" + "xorm.io/xorm/schemas" +) + +// Query runs a raw sql and return records as []map[string][]byte +func (session *Session) Query(sqlOrArgs ...interface{}) ([]map[string][]byte, error) { + if session.isAutoClose { + defer session.Close() + } + + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) + if err != nil { + return nil, err + } + + return session.queryBytes(sqlStr, args...) +} + +func value2String(rawValue *reflect.Value) (str string, err error) { + aa := reflect.TypeOf((*rawValue).Interface()) + vv := reflect.ValueOf((*rawValue).Interface()) + switch aa.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + str = strconv.FormatInt(vv.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + str = strconv.FormatUint(vv.Uint(), 10) + case reflect.Float32, reflect.Float64: + str = strconv.FormatFloat(vv.Float(), 'f', -1, 64) + case reflect.String: + str = vv.String() + case reflect.Array, reflect.Slice: + switch aa.Elem().Kind() { + case reflect.Uint8: + data := rawValue.Interface().([]byte) + str = string(data) + if str == "\x00" { + str = "0" + } + default: + err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name()) + } + // time type + case reflect.Struct: + if aa.ConvertibleTo(schemas.TimeType) { + str = vv.Convert(schemas.TimeType).Interface().(time.Time).Format(time.RFC3339Nano) + } else { + err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name()) + } + case reflect.Bool: + str = strconv.FormatBool(vv.Bool()) + case reflect.Complex128, reflect.Complex64: + str = fmt.Sprintf("%v", vv.Complex()) + /* TODO: unsupported types below + case reflect.Map: + case reflect.Ptr: + case reflect.Uintptr: + case reflect.UnsafePointer: + case reflect.Chan, reflect.Func, reflect.Interface: + */ + default: + err = fmt.Errorf("Unsupported struct type %v", vv.Type().Name()) + } + return +} + +func row2mapStr(rows *core.Rows, fields []string) (resultsMap map[string]string, err error) { + result := make(map[string]string) + scanResultContainers := make([]interface{}, len(fields)) + for i := 0; i < len(fields); i++ { + var scanResultContainer interface{} + scanResultContainers[i] = &scanResultContainer + } + if err := rows.Scan(scanResultContainers...); err != nil { + return nil, err + } + + for ii, key := range fields { + rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii])) + // if row is null then as empty string + if rawValue.Interface() == nil { + result[key] = "" + continue + } + + if data, err := value2String(&rawValue); err == nil { + result[key] = data + } else { + return nil, err + } + } + return result, nil +} + +func row2sliceStr(rows *core.Rows, fields []string) (results []string, err error) { + result := make([]string, 0, len(fields)) + scanResultContainers := make([]interface{}, len(fields)) + for i := 0; i < len(fields); i++ { + var scanResultContainer interface{} + scanResultContainers[i] = &scanResultContainer + } + if err := rows.Scan(scanResultContainers...); err != nil { + return nil, err + } + + for i := 0; i < len(fields); i++ { + rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[i])) + // if row is null then as empty string + if rawValue.Interface() == nil { + result = append(result, "") + continue + } + + if data, err := value2String(&rawValue); err == nil { + result = append(result, data) + } else { + return nil, err + } + } + return result, nil +} + +func rows2Strings(rows *core.Rows) (resultsSlice []map[string]string, err error) { + fields, err := rows.Columns() + if err != nil { + return nil, err + } + for rows.Next() { + result, err := row2mapStr(rows, fields) + if err != nil { + return nil, err + } + resultsSlice = append(resultsSlice, result) + } + + return resultsSlice, nil +} + +func rows2SliceString(rows *core.Rows) (resultsSlice [][]string, err error) { + fields, err := rows.Columns() + if err != nil { + return nil, err + } + for rows.Next() { + record, err := row2sliceStr(rows, fields) + if err != nil { + return nil, err + } + resultsSlice = append(resultsSlice, record) + } + + return resultsSlice, nil +} + +// QueryString runs a raw sql and return records as []map[string]string +func (session *Session) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) { + if session.isAutoClose { + defer session.Close() + } + + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) + if err != nil { + return nil, err + } + + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + return rows2Strings(rows) +} + +// QuerySliceString runs a raw sql and return records as [][]string +func (session *Session) QuerySliceString(sqlOrArgs ...interface{}) ([][]string, error) { + if session.isAutoClose { + defer session.Close() + } + + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) + if err != nil { + return nil, err + } + + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + return rows2SliceString(rows) +} + +func row2mapInterface(rows *core.Rows, fields []string) (resultsMap map[string]interface{}, err error) { + resultsMap = make(map[string]interface{}, len(fields)) + scanResultContainers := make([]interface{}, len(fields)) + for i := 0; i < len(fields); i++ { + var scanResultContainer interface{} + scanResultContainers[i] = &scanResultContainer + } + if err := rows.Scan(scanResultContainers...); err != nil { + return nil, err + } + + for ii, key := range fields { + resultsMap[key] = reflect.Indirect(reflect.ValueOf(scanResultContainers[ii])).Interface() + } + return +} + +func rows2Interfaces(rows *core.Rows) (resultsSlice []map[string]interface{}, err error) { + fields, err := rows.Columns() + if err != nil { + return nil, err + } + for rows.Next() { + result, err := row2mapInterface(rows, fields) + if err != nil { + return nil, err + } + resultsSlice = append(resultsSlice, result) + } + + return resultsSlice, nil +} + +// QueryInterface runs a raw sql and return records as []map[string]interface{} +func (session *Session) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) { + if session.isAutoClose { + defer session.Close() + } + + sqlStr, args, err := session.statement.GenQuerySQL(sqlOrArgs...) + if err != nil { + return nil, err + } + + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + return rows2Interfaces(rows) +} diff --git a/vendor/xorm.io/xorm/session_raw.go b/vendor/xorm.io/xorm/session_raw.go new file mode 100644 index 0000000000..4cfe297abe --- /dev/null +++ b/vendor/xorm.io/xorm/session_raw.go @@ -0,0 +1,180 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql" + "reflect" + + "xorm.io/xorm/core" +) + +func (session *Session) queryPreprocess(sqlStr *string, paramStr ...interface{}) { + for _, filter := range session.engine.dialect.Filters() { + *sqlStr = filter.Do(*sqlStr) + } + + session.lastSQL = *sqlStr + session.lastSQLArgs = paramStr +} + +func (session *Session) queryRows(sqlStr string, args ...interface{}) (*core.Rows, error) { + defer session.resetStatement() + if session.statement.LastError != nil { + return nil, session.statement.LastError + } + + session.queryPreprocess(&sqlStr, args...) + + session.lastSQL = sqlStr + session.lastSQLArgs = args + + if session.isAutoCommit { + var db *core.DB + if session.sessionType == groupSession { + db = session.engine.engineGroup.Slave().DB() + } else { + db = session.DB() + } + + if session.prepareStmt { + // don't clear stmt since session will cache them + stmt, err := session.doPrepare(db, sqlStr) + if err != nil { + return nil, err + } + + rows, err := stmt.QueryContext(session.ctx, args...) + if err != nil { + return nil, err + } + return rows, nil + } + + rows, err := db.QueryContext(session.ctx, sqlStr, args...) + if err != nil { + return nil, err + } + return rows, nil + } + + rows, err := session.tx.QueryContext(session.ctx, sqlStr, args...) + if err != nil { + return nil, err + } + return rows, nil +} + +func (session *Session) queryRow(sqlStr string, args ...interface{}) *core.Row { + return core.NewRow(session.queryRows(sqlStr, args...)) +} + +func value2Bytes(rawValue *reflect.Value) ([]byte, error) { + str, err := value2String(rawValue) + if err != nil { + return nil, err + } + return []byte(str), nil +} + +func row2map(rows *core.Rows, fields []string) (resultsMap map[string][]byte, err error) { + result := make(map[string][]byte) + scanResultContainers := make([]interface{}, len(fields)) + for i := 0; i < len(fields); i++ { + var scanResultContainer interface{} + scanResultContainers[i] = &scanResultContainer + } + if err := rows.Scan(scanResultContainers...); err != nil { + return nil, err + } + + for ii, key := range fields { + rawValue := reflect.Indirect(reflect.ValueOf(scanResultContainers[ii])) + //if row is null then ignore + if rawValue.Interface() == nil { + result[key] = []byte{} + continue + } + + if data, err := value2Bytes(&rawValue); err == nil { + result[key] = data + } else { + return nil, err // !nashtsai! REVIEW, should return err or just error log? + } + } + return result, nil +} + +func rows2maps(rows *core.Rows) (resultsSlice []map[string][]byte, err error) { + fields, err := rows.Columns() + if err != nil { + return nil, err + } + for rows.Next() { + result, err := row2map(rows, fields) + if err != nil { + return nil, err + } + resultsSlice = append(resultsSlice, result) + } + + return resultsSlice, nil +} + +func (session *Session) queryBytes(sqlStr string, args ...interface{}) ([]map[string][]byte, error) { + rows, err := session.queryRows(sqlStr, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + return rows2maps(rows) +} + +func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, error) { + defer session.resetStatement() + + session.queryPreprocess(&sqlStr, args...) + + session.lastSQL = sqlStr + session.lastSQLArgs = args + + if !session.isAutoCommit { + return session.tx.ExecContext(session.ctx, sqlStr, args...) + } + + if session.prepareStmt { + stmt, err := session.doPrepare(session.DB(), sqlStr) + if err != nil { + return nil, err + } + + res, err := stmt.ExecContext(session.ctx, args...) + if err != nil { + return nil, err + } + return res, nil + } + + return session.DB().ExecContext(session.ctx, sqlStr, args...) +} + +// Exec raw sql +func (session *Session) Exec(sqlOrArgs ...interface{}) (sql.Result, error) { + if session.isAutoClose { + defer session.Close() + } + + if len(sqlOrArgs) == 0 { + return nil, ErrUnSupportedType + } + + sqlStr, args, err := session.statement.ConvertSQLOrArgs(sqlOrArgs...) + if err != nil { + return nil, err + } + + return session.exec(sqlStr, args...) +} diff --git a/vendor/xorm.io/xorm/session_schema.go b/vendor/xorm.io/xorm/session_schema.go new file mode 100644 index 0000000000..9ccf8abee6 --- /dev/null +++ b/vendor/xorm.io/xorm/session_schema.go @@ -0,0 +1,490 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "bufio" + "database/sql" + "fmt" + "io" + "os" + "strings" + + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +// Ping test if database is ok +func (session *Session) Ping() error { + if session.isAutoClose { + defer session.Close() + } + + session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName()) + return session.DB().PingContext(session.ctx) +} + +// CreateTable create a table according a bean +func (session *Session) CreateTable(bean interface{}) error { + if session.isAutoClose { + defer session.Close() + } + + return session.createTable(bean) +} + +func (session *Session) createTable(bean interface{}) error { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + sqlStrs := session.statement.GenCreateTableSQL() + for _, s := range sqlStrs { + _, err := session.exec(s) + if err != nil { + return err + } + } + return nil +} + +// CreateIndexes create indexes +func (session *Session) CreateIndexes(bean interface{}) error { + if session.isAutoClose { + defer session.Close() + } + + return session.createIndexes(bean) +} + +func (session *Session) createIndexes(bean interface{}) error { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + sqls := session.statement.GenIndexSQL() + for _, sqlStr := range sqls { + _, err := session.exec(sqlStr) + if err != nil { + return err + } + } + return nil +} + +// CreateUniques create uniques +func (session *Session) CreateUniques(bean interface{}) error { + if session.isAutoClose { + defer session.Close() + } + return session.createUniques(bean) +} + +func (session *Session) createUniques(bean interface{}) error { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + sqls := session.statement.GenUniqueSQL() + for _, sqlStr := range sqls { + _, err := session.exec(sqlStr) + if err != nil { + return err + } + } + return nil +} + +// DropIndexes drop indexes +func (session *Session) DropIndexes(bean interface{}) error { + if session.isAutoClose { + defer session.Close() + } + + return session.dropIndexes(bean) +} + +func (session *Session) dropIndexes(bean interface{}) error { + if err := session.statement.SetRefBean(bean); err != nil { + return err + } + + sqls := session.statement.GenDelIndexSQL() + for _, sqlStr := range sqls { + _, err := session.exec(sqlStr) + if err != nil { + return err + } + } + return nil +} + +// DropTable drop table will drop table if exist, if drop failed, it will return error +func (session *Session) DropTable(beanOrTableName interface{}) error { + if session.isAutoClose { + defer session.Close() + } + + return session.dropTable(beanOrTableName) +} + +func (session *Session) dropTable(beanOrTableName interface{}) error { + tableName := session.engine.TableName(beanOrTableName) + sqlStr, checkIfExist := session.engine.dialect.DropTableSQL(session.engine.TableName(tableName, true)) + if !checkIfExist { + exist, err := session.engine.dialect.IsTableExist(session.getQueryer(), session.ctx, tableName) + if err != nil { + return err + } + checkIfExist = exist + } + + if checkIfExist { + _, err := session.exec(sqlStr) + return err + } + return nil +} + +// IsTableExist if a table is exist +func (session *Session) IsTableExist(beanOrTableName interface{}) (bool, error) { + if session.isAutoClose { + defer session.Close() + } + + tableName := session.engine.TableName(beanOrTableName) + + return session.isTableExist(tableName) +} + +func (session *Session) isTableExist(tableName string) (bool, error) { + return session.engine.dialect.IsTableExist(session.getQueryer(), session.ctx, tableName) +} + +// IsTableEmpty if table have any records +func (session *Session) IsTableEmpty(bean interface{}) (bool, error) { + if session.isAutoClose { + defer session.Close() + } + return session.isTableEmpty(session.engine.TableName(bean)) +} + +func (session *Session) isTableEmpty(tableName string) (bool, error) { + var total int64 + sqlStr := fmt.Sprintf("select count(*) from %s", session.engine.Quote(session.engine.TableName(tableName, true))) + err := session.queryRow(sqlStr).Scan(&total) + if err != nil { + if err == sql.ErrNoRows { + err = nil + } + return true, err + } + + return total == 0, nil +} + +// find if index is exist according cols +func (session *Session) isIndexExist2(tableName string, cols []string, unique bool) (bool, error) { + indexes, err := session.engine.dialect.GetIndexes(session.getQueryer(), session.ctx, tableName) + if err != nil { + return false, err + } + + for _, index := range indexes { + if utils.SliceEq(index.Cols, cols) { + if unique { + return index.Type == schemas.UniqueType, nil + } + return index.Type == schemas.IndexType, nil + } + } + return false, nil +} + +func (session *Session) addColumn(colName string) error { + col := session.statement.RefTable.GetColumn(colName) + sql := session.engine.dialect.AddColumnSQL(session.statement.TableName(), col) + _, err := session.exec(sql) + return err +} + +func (session *Session) addIndex(tableName, idxName string) error { + index := session.statement.RefTable.Indexes[idxName] + sqlStr := session.engine.dialect.CreateIndexSQL(tableName, index) + _, err := session.exec(sqlStr) + return err +} + +func (session *Session) addUnique(tableName, uqeName string) error { + index := session.statement.RefTable.Indexes[uqeName] + sqlStr := session.engine.dialect.CreateIndexSQL(tableName, index) + _, err := session.exec(sqlStr) + return err +} + +// Sync2 synchronize structs to database tables +func (session *Session) Sync2(beans ...interface{}) error { + engine := session.engine + + if session.isAutoClose { + session.isAutoClose = false + defer session.Close() + } + + tables, err := engine.dialect.GetTables(session.getQueryer(), session.ctx) + if err != nil { + return err + } + + session.autoResetStatement = false + defer func() { + session.autoResetStatement = true + session.resetStatement() + }() + + for _, bean := range beans { + v := utils.ReflectValue(bean) + table, err := engine.tagParser.ParseWithCache(v) + if err != nil { + return err + } + var tbName string + if len(session.statement.AltTableName) > 0 { + tbName = session.statement.AltTableName + } else { + tbName = engine.TableName(bean) + } + tbNameWithSchema := engine.tbNameWithSchema(tbName) + + var oriTable *schemas.Table + for _, tb := range tables { + if strings.EqualFold(engine.tbNameWithSchema(tb.Name), engine.tbNameWithSchema(tbName)) { + oriTable = tb + break + } + } + + // this is a new table + if oriTable == nil { + err = session.StoreEngine(session.statement.StoreEngine).createTable(bean) + if err != nil { + return err + } + + err = session.createUniques(bean) + if err != nil { + return err + } + + err = session.createIndexes(bean) + if err != nil { + return err + } + continue + } + + // this will modify an old table + if err = engine.loadTableInfo(oriTable); err != nil { + return err + } + + // check columns + for _, col := range table.Columns() { + var oriCol *schemas.Column + for _, col2 := range oriTable.Columns() { + if strings.EqualFold(col.Name, col2.Name) { + oriCol = col2 + break + } + } + + // column is not exist on table + if oriCol == nil { + session.statement.RefTable = table + session.statement.SetTableName(tbNameWithSchema) + if err = session.addColumn(col.Name); err != nil { + return err + } + continue + } + + err = nil + expectedType := engine.dialect.SQLType(col) + curType := engine.dialect.SQLType(oriCol) + if expectedType != curType { + if expectedType == schemas.Text && + strings.HasPrefix(curType, schemas.Varchar) { + // currently only support mysql & postgres + if engine.dialect.URI().DBType == schemas.MYSQL || + engine.dialect.URI().DBType == schemas.POSTGRES { + engine.logger.Infof("Table %s column %s change type from %s to %s\n", + tbNameWithSchema, col.Name, curType, expectedType) + _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) + } else { + engine.logger.Warnf("Table %s column %s db type is %s, struct type is %s\n", + tbNameWithSchema, col.Name, curType, expectedType) + } + } else if strings.HasPrefix(curType, schemas.Varchar) && strings.HasPrefix(expectedType, schemas.Varchar) { + if engine.dialect.URI().DBType == schemas.MYSQL { + if oriCol.Length < col.Length { + engine.logger.Infof("Table %s column %s change type from varchar(%d) to varchar(%d)\n", + tbNameWithSchema, col.Name, oriCol.Length, col.Length) + _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) + } + } + } else { + if !(strings.HasPrefix(curType, expectedType) && curType[len(expectedType)] == '(') { + engine.logger.Warnf("Table %s column %s db type is %s, struct type is %s", + tbNameWithSchema, col.Name, curType, expectedType) + } + } + } else if expectedType == schemas.Varchar { + if engine.dialect.URI().DBType == schemas.MYSQL { + if oriCol.Length < col.Length { + engine.logger.Infof("Table %s column %s change type from varchar(%d) to varchar(%d)\n", + tbNameWithSchema, col.Name, oriCol.Length, col.Length) + _, err = session.exec(engine.dialect.ModifyColumnSQL(tbNameWithSchema, col)) + } + } + } + + if col.Default != oriCol.Default { + switch { + case col.IsAutoIncrement: // For autoincrement column, don't check default + case (col.SQLType.Name == schemas.Bool || col.SQLType.Name == schemas.Boolean) && + ((strings.EqualFold(col.Default, "true") && oriCol.Default == "1") || + (strings.EqualFold(col.Default, "false") && oriCol.Default == "0")): + default: + engine.logger.Warnf("Table %s Column %s db default is %s, struct default is %s", + tbName, col.Name, oriCol.Default, col.Default) + } + } + if col.Nullable != oriCol.Nullable { + engine.logger.Warnf("Table %s Column %s db nullable is %v, struct nullable is %v", + tbName, col.Name, oriCol.Nullable, col.Nullable) + } + + if err != nil { + return err + } + } + + var foundIndexNames = make(map[string]bool) + var addedNames = make(map[string]*schemas.Index) + + for name, index := range table.Indexes { + var oriIndex *schemas.Index + for name2, index2 := range oriTable.Indexes { + if index.Equal(index2) { + oriIndex = index2 + foundIndexNames[name2] = true + break + } + } + + if oriIndex != nil { + if oriIndex.Type != index.Type { + sql := engine.dialect.DropIndexSQL(tbNameWithSchema, oriIndex) + _, err = session.exec(sql) + if err != nil { + return err + } + oriIndex = nil + } + } + + if oriIndex == nil { + addedNames[name] = index + } + } + + for name2, index2 := range oriTable.Indexes { + if _, ok := foundIndexNames[name2]; !ok { + sql := engine.dialect.DropIndexSQL(tbNameWithSchema, index2) + _, err = session.exec(sql) + if err != nil { + return err + } + } + } + + for name, index := range addedNames { + if index.Type == schemas.UniqueType { + session.statement.RefTable = table + session.statement.SetTableName(tbNameWithSchema) + err = session.addUnique(tbNameWithSchema, name) + } else if index.Type == schemas.IndexType { + session.statement.RefTable = table + session.statement.SetTableName(tbNameWithSchema) + err = session.addIndex(tbNameWithSchema, name) + } + if err != nil { + return err + } + } + + // check all the columns which removed from struct fields but left on database tables. + for _, colName := range oriTable.ColumnsSeq() { + if table.GetColumn(colName) == nil { + engine.logger.Warnf("Table %s has column %s but struct has not related field", engine.TableName(oriTable.Name, true), colName) + } + } + } + + return nil +} + +// ImportFile SQL DDL file +func (session *Session) ImportFile(ddlPath string) ([]sql.Result, error) { + file, err := os.Open(ddlPath) + if err != nil { + return nil, err + } + defer file.Close() + return session.Import(file) +} + +// Import SQL DDL from io.Reader +func (session *Session) Import(r io.Reader) ([]sql.Result, error) { + var results []sql.Result + var lastError error + scanner := bufio.NewScanner(r) + + var inSingleQuote bool + semiColSpliter := func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + for i, b := range data { + if b == '\'' { + inSingleQuote = !inSingleQuote + } + if !inSingleQuote && b == ';' { + return i + 1, data[0:i], nil + } + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil + } + + scanner.Split(semiColSpliter) + + for scanner.Scan() { + query := strings.Trim(scanner.Text(), " \t\n\r") + if len(query) > 0 { + result, err := session.Exec(query) + results = append(results, result) + if err != nil { + return nil, err + } + } + } + + return results, lastError +} diff --git a/vendor/xorm.io/xorm/session_stats.go b/vendor/xorm.io/xorm/session_stats.go new file mode 100644 index 0000000000..17d0a675ae --- /dev/null +++ b/vendor/xorm.io/xorm/session_stats.go @@ -0,0 +1,81 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "database/sql" + "errors" + "reflect" +) + +// Count counts the records. bean's non-empty fields +// are conditions. +func (session *Session) Count(bean ...interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + sqlStr, args, err := session.statement.GenCountSQL(bean...) + if err != nil { + return 0, err + } + + var total int64 + err = session.queryRow(sqlStr, args...).Scan(&total) + if err == sql.ErrNoRows || err == nil { + return total, nil + } + + return 0, err +} + +// sum call sum some column. bean's non-empty fields are conditions. +func (session *Session) sum(res interface{}, bean interface{}, columnNames ...string) error { + if session.isAutoClose { + defer session.Close() + } + + v := reflect.ValueOf(res) + if v.Kind() != reflect.Ptr { + return errors.New("need a pointer to a variable") + } + + sqlStr, args, err := session.statement.GenSumSQL(bean, columnNames...) + if err != nil { + return err + } + + if v.Elem().Kind() == reflect.Slice { + err = session.queryRow(sqlStr, args...).ScanSlice(res) + } else { + err = session.queryRow(sqlStr, args...).Scan(res) + } + if err == sql.ErrNoRows || err == nil { + return nil + } + return err +} + +// Sum call sum some column. bean's non-empty fields are conditions. +func (session *Session) Sum(bean interface{}, columnName string) (res float64, err error) { + return res, session.sum(&res, bean, columnName) +} + +// SumInt call sum some column. bean's non-empty fields are conditions. +func (session *Session) SumInt(bean interface{}, columnName string) (res int64, err error) { + return res, session.sum(&res, bean, columnName) +} + +// Sums call sum some columns. bean's non-empty fields are conditions. +func (session *Session) Sums(bean interface{}, columnNames ...string) ([]float64, error) { + var res = make([]float64, len(columnNames), len(columnNames)) + return res, session.sum(&res, bean, columnNames...) +} + +// SumsInt sum specify columns and return as []int64 instead of []float64 +func (session *Session) SumsInt(bean interface{}, columnNames ...string) ([]int64, error) { + var res = make([]int64, len(columnNames), len(columnNames)) + return res, session.sum(&res, bean, columnNames...) +} diff --git a/vendor/xorm.io/xorm/session_tx.go b/vendor/xorm.io/xorm/session_tx.go new file mode 100644 index 0000000000..cd23cf89c1 --- /dev/null +++ b/vendor/xorm.io/xorm/session_tx.go @@ -0,0 +1,127 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "time" + + "xorm.io/xorm/log" +) + +// Begin a transaction +func (session *Session) Begin() error { + if session.isAutoCommit { + tx, err := session.DB().BeginTx(session.ctx, nil) + if err != nil { + return err + } + session.isAutoCommit = false + session.isCommitedOrRollbacked = false + session.tx = tx + + session.saveLastSQL("BEGIN TRANSACTION") + } + return nil +} + +// Rollback When using transaction, you can rollback if any error +func (session *Session) Rollback() error { + if !session.isAutoCommit && !session.isCommitedOrRollbacked { + session.saveLastSQL("ROLL BACK") + session.isCommitedOrRollbacked = true + session.isAutoCommit = true + + start := time.Now() + needSQL := session.DB().NeedLogSQL(session.ctx) + if needSQL { + session.engine.logger.BeforeSQL(log.LogContext{ + Ctx: session.ctx, + SQL: "ROLL BACK", + }) + } + err := session.tx.Rollback() + if needSQL { + session.engine.logger.AfterSQL(log.LogContext{ + Ctx: session.ctx, + SQL: "ROLL BACK", + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } + return err + } + return nil +} + +// Commit When using transaction, Commit will commit all operations. +func (session *Session) Commit() error { + if !session.isAutoCommit && !session.isCommitedOrRollbacked { + session.saveLastSQL("COMMIT") + session.isCommitedOrRollbacked = true + session.isAutoCommit = true + + start := time.Now() + needSQL := session.DB().NeedLogSQL(session.ctx) + if needSQL { + session.engine.logger.BeforeSQL(log.LogContext{ + Ctx: session.ctx, + SQL: "COMMIT", + }) + } + err := session.tx.Commit() + if needSQL { + session.engine.logger.AfterSQL(log.LogContext{ + Ctx: session.ctx, + SQL: "COMMIT", + ExecuteTime: time.Now().Sub(start), + Err: err, + }) + } + + if err != nil { + return err + } + + // handle processors after tx committed + closureCallFunc := func(closuresPtr *[]func(interface{}), bean interface{}) { + if closuresPtr != nil { + for _, closure := range *closuresPtr { + closure(bean) + } + } + } + + for bean, closuresPtr := range session.afterInsertBeans { + closureCallFunc(closuresPtr, bean) + + if processor, ok := interface{}(bean).(AfterInsertProcessor); ok { + processor.AfterInsert() + } + } + for bean, closuresPtr := range session.afterUpdateBeans { + closureCallFunc(closuresPtr, bean) + + if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok { + processor.AfterUpdate() + } + } + for bean, closuresPtr := range session.afterDeleteBeans { + closureCallFunc(closuresPtr, bean) + + if processor, ok := interface{}(bean).(AfterDeleteProcessor); ok { + processor.AfterDelete() + } + } + cleanUpFunc := func(slices *map[interface{}]*[]func(interface{})) { + if len(*slices) > 0 { + *slices = make(map[interface{}]*[]func(interface{}), 0) + } + } + cleanUpFunc(&session.afterInsertBeans) + cleanUpFunc(&session.afterUpdateBeans) + cleanUpFunc(&session.afterDeleteBeans) + } + return nil +} diff --git a/vendor/xorm.io/xorm/session_update.go b/vendor/xorm.io/xorm/session_update.go new file mode 100644 index 0000000000..7df8c75253 --- /dev/null +++ b/vendor/xorm.io/xorm/session_update.go @@ -0,0 +1,532 @@ +// Copyright 2016 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "xorm.io/builder" + "xorm.io/xorm/caches" + "xorm.io/xorm/internal/utils" + "xorm.io/xorm/schemas" +) + +func (session *Session) cacheUpdate(table *schemas.Table, tableName, sqlStr string, args ...interface{}) error { + if table == nil || + session.tx != nil { + return ErrCacheFailed + } + + oldhead, newsql := session.statement.ConvertUpdateSQL(sqlStr) + if newsql == "" { + return ErrCacheFailed + } + for _, filter := range session.engine.dialect.Filters() { + newsql = filter.Do(newsql) + } + session.engine.logger.Debugf("[cache] new sql: %v, %v", oldhead, newsql) + + var nStart int + if len(args) > 0 { + if strings.Index(sqlStr, "?") > -1 { + nStart = strings.Count(oldhead, "?") + } else { + // only for pq, TODO: if any other databse? + nStart = strings.Count(oldhead, "$") + } + } + + cacher := session.engine.GetCacher(tableName) + session.engine.logger.Debugf("[cache] get cache sql: %v, %v", newsql, args[nStart:]) + ids, err := caches.GetCacheSql(cacher, tableName, newsql, args[nStart:]) + if err != nil { + rows, err := session.NoCache().queryRows(newsql, args[nStart:]...) + if err != nil { + return err + } + defer rows.Close() + + ids = make([]schemas.PK, 0) + for rows.Next() { + var res = make([]string, len(table.PrimaryKeys)) + err = rows.ScanSlice(&res) + if err != nil { + return err + } + var pk schemas.PK = make([]interface{}, len(table.PrimaryKeys)) + for i, col := range table.PKColumns() { + if col.SQLType.IsNumeric() { + n, err := strconv.ParseInt(res[i], 10, 64) + if err != nil { + return err + } + pk[i] = n + } else if col.SQLType.IsText() { + pk[i] = res[i] + } else { + return errors.New("not supported") + } + } + + ids = append(ids, pk) + } + session.engine.logger.Debugf("[cache] find updated id: %v", ids) + } /*else { + session.engine.LogDebug("[xorm:cacheUpdate] del cached sql:", tableName, newsql, args) + cacher.DelIds(tableName, genSqlKey(newsql, args)) + }*/ + + for _, id := range ids { + sid, err := id.ToString() + if err != nil { + return err + } + if bean := cacher.GetBean(tableName, sid); bean != nil { + sqls := utils.SplitNNoCase(sqlStr, "where", 2) + if len(sqls) == 0 || len(sqls) > 2 { + return ErrCacheFailed + } + + sqls = utils.SplitNNoCase(sqls[0], "set", 2) + if len(sqls) != 2 { + return ErrCacheFailed + } + kvs := strings.Split(strings.TrimSpace(sqls[1]), ",") + + for idx, kv := range kvs { + sps := strings.SplitN(kv, "=", 2) + sps2 := strings.Split(sps[0], ".") + colName := sps2[len(sps2)-1] + colName = session.engine.dialect.Quoter().Trim(colName) + colName = schemas.CommonQuoter.Trim(colName) + + if col := table.GetColumn(colName); col != nil { + fieldValue, err := col.ValueOf(bean) + if err != nil { + session.engine.logger.Errorf("%v", err) + } else { + session.engine.logger.Debugf("[cache] set bean field: %v, %v, %v", bean, colName, fieldValue.Interface()) + if col.IsVersion && session.statement.CheckVersion { + session.incrVersionFieldValue(fieldValue) + } else { + fieldValue.Set(reflect.ValueOf(args[idx])) + } + } + } else { + session.engine.logger.Errorf("[cache] ERROR: column %v is not table %v's", + colName, table.Name) + } + } + + session.engine.logger.Debugf("[cache] update cache: %v, %v, %v", tableName, id, bean) + cacher.PutBean(tableName, sid, bean) + } + } + session.engine.logger.Debugf("[cache] clear cached table sql: %v", tableName) + cacher.ClearIds(tableName) + return nil +} + +// Update records, bean's non-empty fields are updated contents, +// condiBean' non-empty filds are conditions +// CAUTION: +// 1.bool will defaultly be updated content nor conditions +// You should call UseBool if you have bool to use. +// 2.float32 & float64 may be not inexact as conditions +func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int64, error) { + if session.isAutoClose { + defer session.Close() + } + + if session.statement.LastError != nil { + return 0, session.statement.LastError + } + + v := utils.ReflectValue(bean) + t := v.Type() + + var colNames []string + var args []interface{} + + // handle before update processors + for _, closure := range session.beforeClosures { + closure(bean) + } + cleanupProcessorsClosures(&session.beforeClosures) // cleanup after used + if processor, ok := interface{}(bean).(BeforeUpdateProcessor); ok { + processor.BeforeUpdate() + } + // -- + + var err error + var isMap = t.Kind() == reflect.Map + var isStruct = t.Kind() == reflect.Struct + if isStruct { + if err := session.statement.SetRefBean(bean); err != nil { + return 0, err + } + + if len(session.statement.TableName()) <= 0 { + return 0, ErrTableNotFound + } + + if session.statement.ColumnStr() == "" { + colNames, args, err = session.statement.BuildUpdates(v, false, false, + false, false, true) + } else { + colNames, args, err = session.genUpdateColumns(bean) + } + if err != nil { + return 0, err + } + } else if isMap { + colNames = make([]string, 0) + args = make([]interface{}, 0) + bValue := reflect.Indirect(reflect.ValueOf(bean)) + + for _, v := range bValue.MapKeys() { + colNames = append(colNames, session.engine.Quote(v.String())+" = ?") + args = append(args, bValue.MapIndex(v).Interface()) + } + } else { + return 0, ErrParamsType + } + + table := session.statement.RefTable + + if session.statement.UseAutoTime && table != nil && table.Updated != "" { + if !session.statement.ColumnMap.Contain(table.Updated) && + !session.statement.OmitColumnMap.Contain(table.Updated) { + colNames = append(colNames, session.engine.Quote(table.Updated)+" = ?") + col := table.UpdatedColumn() + val, t := session.engine.nowTime(col) + if session.engine.dialect.URI().DBType == schemas.ORACLE { + args = append(args, t) + } else { + args = append(args, val) + } + + var colName = col.Name + if isStruct { + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } + } + } + + // for update action to like "column = column + ?" + incColumns := session.statement.IncrColumns + for i, colName := range incColumns.ColNames { + colNames = append(colNames, session.engine.Quote(colName)+" = "+session.engine.Quote(colName)+" + ?") + args = append(args, incColumns.Args[i]) + } + // for update action to like "column = column - ?" + decColumns := session.statement.DecrColumns + for i, colName := range decColumns.ColNames { + colNames = append(colNames, session.engine.Quote(colName)+" = "+session.engine.Quote(colName)+" - ?") + args = append(args, decColumns.Args[i]) + } + // for update action to like "column = expression" + exprColumns := session.statement.ExprColumns + for i, colName := range exprColumns.ColNames { + switch tp := exprColumns.Args[i].(type) { + case string: + if len(tp) == 0 { + tp = "''" + } + colNames = append(colNames, session.engine.Quote(colName)+"="+tp) + case *builder.Builder: + subQuery, subArgs, err := session.statement.GenCondSQL(tp) + if err != nil { + return 0, err + } + colNames = append(colNames, session.engine.Quote(colName)+"=("+subQuery+")") + args = append(args, subArgs...) + default: + colNames = append(colNames, session.engine.Quote(colName)+"=?") + args = append(args, exprColumns.Args[i]) + } + } + + if err = session.statement.ProcessIDParam(); err != nil { + return 0, err + } + + var autoCond builder.Cond + if !session.statement.NoAutoCondition { + condBeanIsStruct := false + if len(condiBean) > 0 { + if c, ok := condiBean[0].(map[string]interface{}); ok { + autoCond = builder.Eq(c) + } else { + ct := reflect.TypeOf(condiBean[0]) + k := ct.Kind() + if k == reflect.Ptr { + k = ct.Elem().Kind() + } + if k == reflect.Struct { + var err error + autoCond, err = session.statement.BuildConds(session.statement.RefTable, condiBean[0], true, true, false, true, false) + if err != nil { + return 0, err + } + condBeanIsStruct = true + } else { + return 0, ErrConditionType + } + } + } + + if !condBeanIsStruct && table != nil { + if col := table.DeletedColumn(); col != nil && !session.statement.GetUnscoped() { // tag "deleted" is enabled + autoCond1 := session.statement.CondDeleted(col) + + if autoCond == nil { + autoCond = autoCond1 + } else { + autoCond = autoCond.And(autoCond1) + } + } + } + } + + st := session.statement + + var ( + sqlStr string + condArgs []interface{} + condSQL string + cond = session.statement.Conds().And(autoCond) + + doIncVer = isStruct && (table != nil && table.Version != "" && session.statement.CheckVersion) + verValue *reflect.Value + ) + if doIncVer { + verValue, err = table.VersionColumn().ValueOf(bean) + if err != nil { + return 0, err + } + + if verValue != nil { + cond = cond.And(builder.Eq{session.engine.Quote(table.Version): verValue.Interface()}) + colNames = append(colNames, session.engine.Quote(table.Version)+" = "+session.engine.Quote(table.Version)+" + 1") + } + } + + if len(colNames) <= 0 { + return 0, errors.New("No content found to be updated") + } + + condSQL, condArgs, err = session.statement.GenCondSQL(cond) + if err != nil { + return 0, err + } + + if len(condSQL) > 0 { + condSQL = "WHERE " + condSQL + } + + if st.OrderStr != "" { + condSQL = condSQL + fmt.Sprintf(" ORDER BY %v", st.OrderStr) + } + + var tableName = session.statement.TableName() + // TODO: Oracle support needed + var top string + if st.LimitN != nil { + limitValue := *st.LimitN + switch session.engine.dialect.URI().DBType { + case schemas.MYSQL: + condSQL = condSQL + fmt.Sprintf(" LIMIT %d", limitValue) + case schemas.SQLITE: + tempCondSQL := condSQL + fmt.Sprintf(" LIMIT %d", limitValue) + cond = cond.And(builder.Expr(fmt.Sprintf("rowid IN (SELECT rowid FROM %v %v)", + session.engine.Quote(tableName), tempCondSQL), condArgs...)) + condSQL, condArgs, err = session.statement.GenCondSQL(cond) + if err != nil { + return 0, err + } + if len(condSQL) > 0 { + condSQL = "WHERE " + condSQL + } + case schemas.POSTGRES: + tempCondSQL := condSQL + fmt.Sprintf(" LIMIT %d", limitValue) + cond = cond.And(builder.Expr(fmt.Sprintf("CTID IN (SELECT CTID FROM %v %v)", + session.engine.Quote(tableName), tempCondSQL), condArgs...)) + condSQL, condArgs, err = session.statement.GenCondSQL(cond) + if err != nil { + return 0, err + } + + if len(condSQL) > 0 { + condSQL = "WHERE " + condSQL + } + case schemas.MSSQL: + if st.OrderStr != "" && table != nil && len(table.PrimaryKeys) == 1 { + cond = builder.Expr(fmt.Sprintf("%s IN (SELECT TOP (%d) %s FROM %v%v)", + table.PrimaryKeys[0], limitValue, table.PrimaryKeys[0], + session.engine.Quote(tableName), condSQL), condArgs...) + + condSQL, condArgs, err = session.statement.GenCondSQL(cond) + if err != nil { + return 0, err + } + if len(condSQL) > 0 { + condSQL = "WHERE " + condSQL + } + } else { + top = fmt.Sprintf("TOP (%d) ", limitValue) + } + } + } + + var tableAlias = session.engine.Quote(tableName) + var fromSQL string + if session.statement.TableAlias != "" { + switch session.engine.dialect.URI().DBType { + case schemas.MSSQL: + fromSQL = fmt.Sprintf("FROM %s %s ", tableAlias, session.statement.TableAlias) + tableAlias = session.statement.TableAlias + default: + tableAlias = fmt.Sprintf("%s AS %s", tableAlias, session.statement.TableAlias) + } + } + + sqlStr = fmt.Sprintf("UPDATE %v%v SET %v %v%v", + top, + tableAlias, + strings.Join(colNames, ", "), + fromSQL, + condSQL) + + res, err := session.exec(sqlStr, append(args, condArgs...)...) + if err != nil { + return 0, err + } else if doIncVer { + if verValue != nil && verValue.IsValid() && verValue.CanSet() { + session.incrVersionFieldValue(verValue) + } + } + + if cacher := session.engine.GetCacher(tableName); cacher != nil && session.statement.UseCache { + // session.cacheUpdate(table, tableName, sqlStr, args...) + session.engine.logger.Debugf("[cache] clear table: %v", tableName) + cacher.ClearIds(tableName) + cacher.ClearBeans(tableName) + } + + // handle after update processors + if session.isAutoCommit { + for _, closure := range session.afterClosures { + closure(bean) + } + if processor, ok := interface{}(bean).(AfterUpdateProcessor); ok { + session.engine.logger.Debugf("[event] %v has after update processor", tableName) + processor.AfterUpdate() + } + } else { + lenAfterClosures := len(session.afterClosures) + if lenAfterClosures > 0 { + if value, has := session.afterUpdateBeans[bean]; has && value != nil { + *value = append(*value, session.afterClosures...) + } else { + afterClosures := make([]func(interface{}), lenAfterClosures) + copy(afterClosures, session.afterClosures) + // FIXME: if bean is a map type, it will panic because map cannot be as map key + session.afterUpdateBeans[bean] = &afterClosures + } + + } else { + if _, ok := interface{}(bean).(AfterUpdateProcessor); ok { + session.afterUpdateBeans[bean] = nil + } + } + } + cleanupProcessorsClosures(&session.afterClosures) // cleanup after used + // -- + + return res.RowsAffected() +} + +func (session *Session) genUpdateColumns(bean interface{}) ([]string, []interface{}, error) { + table := session.statement.RefTable + colNames := make([]string, 0, len(table.ColumnsSeq())) + args := make([]interface{}, 0, len(table.ColumnsSeq())) + + for _, col := range table.Columns() { + if !col.IsVersion && !col.IsCreated && !col.IsUpdated { + if session.statement.OmitColumnMap.Contain(col.Name) { + continue + } + } + if col.MapType == schemas.ONLYFROMDB { + continue + } + + fieldValuePtr, err := col.ValueOf(bean) + if err != nil { + return nil, nil, err + } + fieldValue := *fieldValuePtr + + if col.IsAutoIncrement && utils.IsValueZero(fieldValue) { + continue + } + + if (col.IsDeleted && !session.statement.GetUnscoped()) || col.IsCreated { + continue + } + + // if only update specify columns + if len(session.statement.ColumnMap) > 0 && !session.statement.ColumnMap.Contain(col.Name) { + continue + } + + if session.statement.IncrColumns.IsColExist(col.Name) { + continue + } else if session.statement.DecrColumns.IsColExist(col.Name) { + continue + } else if session.statement.ExprColumns.IsColExist(col.Name) { + continue + } + + // !evalphobia! set fieldValue as nil when column is nullable and zero-value + if _, ok := getFlagForColumn(session.statement.NullableMap, col); ok { + if col.Nullable && utils.IsValueZero(fieldValue) { + var nilValue *int + fieldValue = reflect.ValueOf(nilValue) + } + } + + if col.IsUpdated && session.statement.UseAutoTime /*&& isZero(fieldValue.Interface())*/ { + // if time is non-empty, then set to auto time + val, t := session.engine.nowTime(col) + args = append(args, val) + + var colName = col.Name + session.afterClosures = append(session.afterClosures, func(bean interface{}) { + col := table.GetColumn(colName) + setColumnTime(bean, col, t) + }) + } else if col.IsVersion && session.statement.CheckVersion { + args = append(args, 1) + } else { + arg, err := session.statement.Value2Interface(col, fieldValue) + if err != nil { + return colNames, args, err + } + args = append(args, arg) + } + + colNames = append(colNames, session.engine.Quote(col.Name)+" = ?") + } + return colNames, args, nil +} diff --git a/vendor/xorm.io/xorm/tags/parser.go b/vendor/xorm.io/xorm/tags/parser.go new file mode 100644 index 0000000000..add30a1347 --- /dev/null +++ b/vendor/xorm.io/xorm/tags/parser.go @@ -0,0 +1,308 @@ +// Copyright 2020 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tags + +import ( + "encoding/gob" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + "xorm.io/xorm/caches" + "xorm.io/xorm/convert" + "xorm.io/xorm/dialects" + "xorm.io/xorm/names" + "xorm.io/xorm/schemas" +) + +var ( + ErrUnsupportedType = errors.New("Unsupported type") +) + +type Parser struct { + identifier string + dialect dialects.Dialect + columnMapper names.Mapper + tableMapper names.Mapper + handlers map[string]Handler + cacherMgr *caches.Manager + tableCache sync.Map // map[reflect.Type]*schemas.Table +} + +func NewParser(identifier string, dialect dialects.Dialect, tableMapper, columnMapper names.Mapper, cacherMgr *caches.Manager) *Parser { + return &Parser{ + identifier: identifier, + dialect: dialect, + tableMapper: tableMapper, + columnMapper: columnMapper, + handlers: defaultTagHandlers, + cacherMgr: cacherMgr, + } +} + +func (parser *Parser) GetTableMapper() names.Mapper { + return parser.tableMapper +} + +func (parser *Parser) SetTableMapper(mapper names.Mapper) { + parser.ClearCaches() + parser.tableMapper = mapper +} + +func (parser *Parser) GetColumnMapper() names.Mapper { + return parser.columnMapper +} + +func (parser *Parser) SetColumnMapper(mapper names.Mapper) { + parser.ClearCaches() + parser.columnMapper = mapper +} + +func (parser *Parser) ParseWithCache(v reflect.Value) (*schemas.Table, error) { + t := v.Type() + tableI, ok := parser.tableCache.Load(t) + if ok { + return tableI.(*schemas.Table), nil + } + + table, err := parser.Parse(v) + if err != nil { + return nil, err + } + + parser.tableCache.Store(t, table) + + if parser.cacherMgr.GetDefaultCacher() != nil { + if v.CanAddr() { + gob.Register(v.Addr().Interface()) + } else { + gob.Register(v.Interface()) + } + } + + return table, nil +} + +// ClearCacheTable removes the database mapper of a type from the cache +func (parser *Parser) ClearCacheTable(t reflect.Type) { + parser.tableCache.Delete(t) +} + +// ClearCaches removes all the cached table information parsed by structs +func (parser *Parser) ClearCaches() { + parser.tableCache = sync.Map{} +} + +func addIndex(indexName string, table *schemas.Table, col *schemas.Column, indexType int) { + if index, ok := table.Indexes[indexName]; ok { + index.AddColumn(col.Name) + col.Indexes[index.Name] = indexType + } else { + index := schemas.NewIndex(indexName, indexType) + index.AddColumn(col.Name) + table.AddIndex(index) + col.Indexes[index.Name] = indexType + } +} + +// Parse parses a struct as a table information +func (parser *Parser) Parse(v reflect.Value) (*schemas.Table, error) { + t := v.Type() + if t.Kind() == reflect.Ptr { + t = t.Elem() + v = v.Elem() + } + if t.Kind() != reflect.Struct { + return nil, ErrUnsupportedType + } + + table := schemas.NewEmptyTable() + table.Type = t + table.Name = names.GetTableName(parser.tableMapper, v) + + var idFieldColName string + var hasCacheTag, hasNoCacheTag bool + + for i := 0; i < t.NumField(); i++ { + tag := t.Field(i).Tag + + ormTagStr := tag.Get(parser.identifier) + var col *schemas.Column + fieldValue := v.Field(i) + fieldType := fieldValue.Type() + + if ormTagStr != "" { + col = &schemas.Column{ + FieldName: t.Field(i).Name, + Nullable: true, + IsPrimaryKey: false, + IsAutoIncrement: false, + MapType: schemas.TWOSIDES, + Indexes: make(map[string]int), + DefaultIsEmpty: true, + } + tags := splitTag(ormTagStr) + + if len(tags) > 0 { + if tags[0] == "-" { + continue + } + + var ctx = Context{ + table: table, + col: col, + fieldValue: fieldValue, + indexNames: make(map[string]int), + parser: parser, + } + + if strings.HasPrefix(strings.ToUpper(tags[0]), "EXTENDS") { + pStart := strings.Index(tags[0], "(") + if pStart > -1 && strings.HasSuffix(tags[0], ")") { + var tagPrefix = strings.TrimFunc(tags[0][pStart+1:len(tags[0])-1], func(r rune) bool { + return r == '\'' || r == '"' + }) + + ctx.params = []string{tagPrefix} + } + + if err := ExtendsTagHandler(&ctx); err != nil { + return nil, err + } + continue + } + + for j, key := range tags { + if ctx.ignoreNext { + ctx.ignoreNext = false + continue + } + + k := strings.ToUpper(key) + ctx.tagName = k + ctx.params = []string{} + + pStart := strings.Index(k, "(") + if pStart == 0 { + return nil, errors.New("( could not be the first character") + } + if pStart > -1 { + if !strings.HasSuffix(k, ")") { + return nil, fmt.Errorf("field %s tag %s cannot match ) character", col.FieldName, key) + } + + ctx.tagName = k[:pStart] + ctx.params = strings.Split(key[pStart+1:len(k)-1], ",") + } + + if j > 0 { + ctx.preTag = strings.ToUpper(tags[j-1]) + } + if j < len(tags)-1 { + ctx.nextTag = tags[j+1] + } else { + ctx.nextTag = "" + } + + if h, ok := parser.handlers[ctx.tagName]; ok { + if err := h(&ctx); err != nil { + return nil, err + } + } else { + if strings.HasPrefix(key, "'") && strings.HasSuffix(key, "'") { + col.Name = key[1 : len(key)-1] + } else { + col.Name = key + } + } + + if ctx.hasCacheTag { + hasCacheTag = true + } + if ctx.hasNoCacheTag { + hasNoCacheTag = true + } + } + + if col.SQLType.Name == "" { + col.SQLType = schemas.Type2SQLType(fieldType) + } + parser.dialect.SQLType(col) + if col.Length == 0 { + col.Length = col.SQLType.DefaultLength + } + if col.Length2 == 0 { + col.Length2 = col.SQLType.DefaultLength2 + } + if col.Name == "" { + col.Name = parser.columnMapper.Obj2Table(t.Field(i).Name) + } + + if ctx.isUnique { + ctx.indexNames[col.Name] = schemas.UniqueType + } else if ctx.isIndex { + ctx.indexNames[col.Name] = schemas.IndexType + } + + for indexName, indexType := range ctx.indexNames { + addIndex(indexName, table, col, indexType) + } + } + } else { + var sqlType schemas.SQLType + if fieldValue.CanAddr() { + if _, ok := fieldValue.Addr().Interface().(convert.Conversion); ok { + sqlType = schemas.SQLType{Name: schemas.Text} + } + } + if _, ok := fieldValue.Interface().(convert.Conversion); ok { + sqlType = schemas.SQLType{Name: schemas.Text} + } else { + sqlType = schemas.Type2SQLType(fieldType) + } + col = schemas.NewColumn(parser.columnMapper.Obj2Table(t.Field(i).Name), + t.Field(i).Name, sqlType, sqlType.DefaultLength, + sqlType.DefaultLength2, true) + + if fieldType.Kind() == reflect.Int64 && (strings.ToUpper(col.FieldName) == "ID" || strings.HasSuffix(strings.ToUpper(col.FieldName), ".ID")) { + idFieldColName = col.Name + } + } + if col.IsAutoIncrement { + col.Nullable = false + } + + table.AddColumn(col) + + } // end for + + if idFieldColName != "" && len(table.PrimaryKeys) == 0 { + col := table.GetColumn(idFieldColName) + col.IsPrimaryKey = true + col.IsAutoIncrement = true + col.Nullable = false + table.PrimaryKeys = append(table.PrimaryKeys, col.Name) + table.AutoIncrement = col.Name + } + + if hasCacheTag { + if parser.cacherMgr.GetDefaultCacher() != nil { // !nash! use engine's cacher if provided + //engine.logger.Info("enable cache on table:", table.Name) + parser.cacherMgr.SetCacher(table.Name, parser.cacherMgr.GetDefaultCacher()) + } else { + //engine.logger.Info("enable LRU cache on table:", table.Name) + parser.cacherMgr.SetCacher(table.Name, caches.NewLRUCacher2(caches.NewMemoryStore(), time.Hour, 10000)) + } + } + if hasNoCacheTag { + //engine.logger.Info("disable cache on table:", table.Name) + parser.cacherMgr.SetCacher(table.Name, nil) + } + + return table, nil +} diff --git a/vendor/xorm.io/xorm/tags/tag.go b/vendor/xorm.io/xorm/tags/tag.go new file mode 100644 index 0000000000..ee3f1e8240 --- /dev/null +++ b/vendor/xorm.io/xorm/tags/tag.go @@ -0,0 +1,332 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tags + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "xorm.io/xorm/schemas" +) + +func splitTag(tag string) (tags []string) { + tag = strings.TrimSpace(tag) + var hasQuote = false + var lastIdx = 0 + for i, t := range tag { + if t == '\'' { + hasQuote = !hasQuote + } else if t == ' ' { + if lastIdx < i && !hasQuote { + tags = append(tags, strings.TrimSpace(tag[lastIdx:i])) + lastIdx = i + 1 + } + } + } + if lastIdx < len(tag) { + tags = append(tags, strings.TrimSpace(tag[lastIdx:])) + } + return +} + +// Context represents a context for xorm tag parse. +type Context struct { + tagName string + params []string + preTag, nextTag string + table *schemas.Table + col *schemas.Column + fieldValue reflect.Value + isIndex bool + isUnique bool + indexNames map[string]int + parser *Parser + hasCacheTag bool + hasNoCacheTag bool + ignoreNext bool +} + +// Handler describes tag handler for XORM +type Handler func(ctx *Context) error + +var ( + // defaultTagHandlers enumerates all the default tag handler + defaultTagHandlers = map[string]Handler{ + "<-": OnlyFromDBTagHandler, + "->": OnlyToDBTagHandler, + "PK": PKTagHandler, + "NULL": NULLTagHandler, + "NOT": IgnoreTagHandler, + "AUTOINCR": AutoIncrTagHandler, + "DEFAULT": DefaultTagHandler, + "CREATED": CreatedTagHandler, + "UPDATED": UpdatedTagHandler, + "DELETED": DeletedTagHandler, + "VERSION": VersionTagHandler, + "UTC": UTCTagHandler, + "LOCAL": LocalTagHandler, + "NOTNULL": NotNullTagHandler, + "INDEX": IndexTagHandler, + "UNIQUE": UniqueTagHandler, + "CACHE": CacheTagHandler, + "NOCACHE": NoCacheTagHandler, + "COMMENT": CommentTagHandler, + } +) + +func init() { + for k := range schemas.SqlTypes { + defaultTagHandlers[k] = SQLTypeTagHandler + } +} + +// IgnoreTagHandler describes ignored tag handler +func IgnoreTagHandler(ctx *Context) error { + return nil +} + +// OnlyFromDBTagHandler describes mapping direction tag handler +func OnlyFromDBTagHandler(ctx *Context) error { + ctx.col.MapType = schemas.ONLYFROMDB + return nil +} + +// OnlyToDBTagHandler describes mapping direction tag handler +func OnlyToDBTagHandler(ctx *Context) error { + ctx.col.MapType = schemas.ONLYTODB + return nil +} + +// PKTagHandler describes primary key tag handler +func PKTagHandler(ctx *Context) error { + ctx.col.IsPrimaryKey = true + ctx.col.Nullable = false + return nil +} + +// NULLTagHandler describes null tag handler +func NULLTagHandler(ctx *Context) error { + ctx.col.Nullable = (strings.ToUpper(ctx.preTag) != "NOT") + return nil +} + +// NotNullTagHandler describes notnull tag handler +func NotNullTagHandler(ctx *Context) error { + ctx.col.Nullable = false + return nil +} + +// AutoIncrTagHandler describes autoincr tag handler +func AutoIncrTagHandler(ctx *Context) error { + ctx.col.IsAutoIncrement = true + /* + if len(ctx.params) > 0 { + autoStartInt, err := strconv.Atoi(ctx.params[0]) + if err != nil { + return err + } + ctx.col.AutoIncrStart = autoStartInt + } else { + ctx.col.AutoIncrStart = 1 + } + */ + return nil +} + +// DefaultTagHandler describes default tag handler +func DefaultTagHandler(ctx *Context) error { + if len(ctx.params) > 0 { + ctx.col.Default = ctx.params[0] + } else { + ctx.col.Default = ctx.nextTag + ctx.ignoreNext = true + } + ctx.col.DefaultIsEmpty = false + return nil +} + +// CreatedTagHandler describes created tag handler +func CreatedTagHandler(ctx *Context) error { + ctx.col.IsCreated = true + return nil +} + +// VersionTagHandler describes version tag handler +func VersionTagHandler(ctx *Context) error { + ctx.col.IsVersion = true + ctx.col.Default = "1" + return nil +} + +// UTCTagHandler describes utc tag handler +func UTCTagHandler(ctx *Context) error { + ctx.col.TimeZone = time.UTC + return nil +} + +// LocalTagHandler describes local tag handler +func LocalTagHandler(ctx *Context) error { + if len(ctx.params) == 0 { + ctx.col.TimeZone = time.Local + } else { + var err error + ctx.col.TimeZone, err = time.LoadLocation(ctx.params[0]) + if err != nil { + return err + } + } + return nil +} + +// UpdatedTagHandler describes updated tag handler +func UpdatedTagHandler(ctx *Context) error { + ctx.col.IsUpdated = true + return nil +} + +// DeletedTagHandler describes deleted tag handler +func DeletedTagHandler(ctx *Context) error { + ctx.col.IsDeleted = true + return nil +} + +// IndexTagHandler describes index tag handler +func IndexTagHandler(ctx *Context) error { + if len(ctx.params) > 0 { + ctx.indexNames[ctx.params[0]] = schemas.IndexType + } else { + ctx.isIndex = true + } + return nil +} + +// UniqueTagHandler describes unique tag handler +func UniqueTagHandler(ctx *Context) error { + if len(ctx.params) > 0 { + ctx.indexNames[ctx.params[0]] = schemas.UniqueType + } else { + ctx.isUnique = true + } + return nil +} + +// CommentTagHandler add comment to column +func CommentTagHandler(ctx *Context) error { + if len(ctx.params) > 0 { + ctx.col.Comment = strings.Trim(ctx.params[0], "' ") + } + return nil +} + +// SQLTypeTagHandler describes SQL Type tag handler +func SQLTypeTagHandler(ctx *Context) error { + ctx.col.SQLType = schemas.SQLType{Name: ctx.tagName} + if len(ctx.params) > 0 { + if ctx.tagName == schemas.Enum { + ctx.col.EnumOptions = make(map[string]int) + for k, v := range ctx.params { + v = strings.TrimSpace(v) + v = strings.Trim(v, "'") + ctx.col.EnumOptions[v] = k + } + } else if ctx.tagName == schemas.Set { + ctx.col.SetOptions = make(map[string]int) + for k, v := range ctx.params { + v = strings.TrimSpace(v) + v = strings.Trim(v, "'") + ctx.col.SetOptions[v] = k + } + } else { + var err error + if len(ctx.params) == 2 { + ctx.col.Length, err = strconv.Atoi(ctx.params[0]) + if err != nil { + return err + } + ctx.col.Length2, err = strconv.Atoi(ctx.params[1]) + if err != nil { + return err + } + } else if len(ctx.params) == 1 { + ctx.col.Length, err = strconv.Atoi(ctx.params[0]) + if err != nil { + return err + } + } + } + } + return nil +} + +// ExtendsTagHandler describes extends tag handler +func ExtendsTagHandler(ctx *Context) error { + var fieldValue = ctx.fieldValue + var isPtr = false + switch fieldValue.Kind() { + case reflect.Ptr: + f := fieldValue.Type().Elem() + if f.Kind() == reflect.Struct { + fieldPtr := fieldValue + fieldValue = fieldValue.Elem() + if !fieldValue.IsValid() || fieldPtr.IsNil() { + fieldValue = reflect.New(f).Elem() + } + } + isPtr = true + fallthrough + case reflect.Struct: + parentTable, err := ctx.parser.Parse(fieldValue) + if err != nil { + return err + } + for _, col := range parentTable.Columns() { + col.FieldName = fmt.Sprintf("%v.%v", ctx.col.FieldName, col.FieldName) + + var tagPrefix = ctx.col.FieldName + if len(ctx.params) > 0 { + col.Nullable = isPtr + tagPrefix = ctx.params[0] + if col.IsPrimaryKey { + col.Name = ctx.col.FieldName + col.IsPrimaryKey = false + } else { + col.Name = fmt.Sprintf("%v%v", tagPrefix, col.Name) + } + } + + if col.Nullable { + col.IsAutoIncrement = false + col.IsPrimaryKey = false + } + + ctx.table.AddColumn(col) + for indexName, indexType := range col.Indexes { + addIndex(indexName, ctx.table, col, indexType) + } + } + default: + //TODO: warning + } + return nil +} + +// CacheTagHandler describes cache tag handler +func CacheTagHandler(ctx *Context) error { + if !ctx.hasCacheTag { + ctx.hasCacheTag = true + } + return nil +} + +// NoCacheTagHandler describes nocache tag handler +func NoCacheTagHandler(ctx *Context) error { + if !ctx.hasNoCacheTag { + ctx.hasNoCacheTag = true + } + return nil +} diff --git a/wire_gen.go b/wire_gen.go index 2d0e72a2b2..7710e02bd3 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -307,7 +307,11 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - enforcerImpl, err := casbin.NewEnforcerImpl(syncedEnforcer, sessionManager, sugaredLogger) + casbinSyncedEnforcer, err := casbin.CreateV2() + if err != nil { + return nil, err + } + enforcerImpl, err := casbin.NewEnforcerImpl(syncedEnforcer, casbinSyncedEnforcer, sessionManager, sugaredLogger) if err != nil { return nil, err } @@ -944,6 +948,6 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - mainApp := NewApp(muxRouter, sugaredLogger, sseSSE, syncedEnforcer, db, sessionManager, posthogClient, loggingMiddlewareImpl, centralEventProcessor, pubSubClientServiceImpl, workflowEventProcessorImpl) + mainApp := NewApp(muxRouter, sugaredLogger, sseSSE, syncedEnforcer, db, sessionManager, posthogClient, loggingMiddlewareImpl, centralEventProcessor, pubSubClientServiceImpl, workflowEventProcessorImpl, casbinSyncedEnforcer) return mainApp, nil }