diff --git a/cmd/sippy/load.go b/cmd/sippy/load.go
index 49e8346f69..9204c87889 100644
--- a/cmd/sippy/load.go
+++ b/cmd/sippy/load.go
@@ -40,6 +40,7 @@ import (
"github.com/openshift/sippy/pkg/dataloader/releaseloader"
"github.com/openshift/sippy/pkg/dataloader/testownershiploader"
"github.com/openshift/sippy/pkg/db"
+ "github.com/openshift/sippy/pkg/db/partitionmanager"
"github.com/openshift/sippy/pkg/flags"
"github.com/openshift/sippy/pkg/github/commenter"
)
@@ -455,6 +456,11 @@ func (f *LoadFlags) prowLoader(ctx context.Context, dbc *db.DB, sippyConfig *v1.
loadSince = &t
}
+ partMgr, err := partitionmanager.NewWithDefaults(dbc.DB)
+ if err != nil {
+ log.WithError(err).Warn("failed to create partition manager, partitions will be managed on demand")
+ }
+
return prowloader.New(
ctx,
dbc,
@@ -468,7 +474,8 @@ func (f *LoadFlags) prowLoader(ctx context.Context, dbc *db.DB, sippyConfig *v1.
ghCommenter,
promPusher,
loadSince,
- syntheticReleaseJobOverrides), nil
+ syntheticReleaseJobOverrides,
+ partMgr), nil
}
// parseProwLoadSince parses a time value that is either an absolute RFC3339 timestamp
diff --git a/cmd/sippy/serve.go b/cmd/sippy/serve.go
index 631a85d5ea..ceb11455b7 100644
--- a/cmd/sippy/serve.go
+++ b/cmd/sippy/serve.go
@@ -25,6 +25,7 @@ import (
"github.com/openshift/sippy/pkg/bigquery/bqlabel"
"github.com/openshift/sippy/pkg/dataloader/prowloader/gcs"
"github.com/openshift/sippy/pkg/db/models"
+ "github.com/openshift/sippy/pkg/db/partitionmanager"
"github.com/openshift/sippy/pkg/flags"
"github.com/openshift/sippy/pkg/flags/configflags"
"github.com/openshift/sippy/pkg/sippyserver"
@@ -253,6 +254,20 @@ func NewServeCommand() *cobra.Command {
}()
}
+ partMgr, pmErr := partitionmanager.NewWithDefaults(dbc.DB)
+ if pmErr != nil {
+ log.WithError(pmErr).Warn("failed to create partition manager, partition maintenance disabled")
+ } else {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ if startErr := partMgr.Start(ctx); startErr != nil {
+ log.WithError(startErr).Warn("failed to start partition manager")
+ } else {
+ defer partMgr.Stop()
+ log.Info("partition manager started for background maintenance")
+ }
+ }
+
server.Serve()
return nil
},
diff --git a/go.mod b/go.mod
index e7044b4613..13179ab308 100644
--- a/go.mod
+++ b/go.mod
@@ -21,6 +21,8 @@ require (
github.com/hashicorp/go-version v1.7.0
github.com/jackc/pgtype v1.14.0
github.com/jferrl/go-githubauth v1.1.0
+ github.com/jirevwe/go_partman v0.4.1
+ github.com/jmoiron/sqlx v1.4.0
github.com/lib/pq v1.10.9
github.com/mark3labs/mcp-go v0.39.1
github.com/montanaflynn/stats v0.6.6
@@ -100,7 +102,7 @@ require (
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
- github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
+ github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx/v4 v4.18.2 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
@@ -111,6 +113,7 @@ require (
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/oklog/ulid/v2 v2.1.0 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/pierrec/lz4/v4 v4.1.18 // indirect
github.com/pjbgf/sha1cd v0.6.0 // indirect
@@ -154,5 +157,6 @@ require (
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
google.golang.org/grpc v1.79.3 // indirect
google.golang.org/protobuf v1.36.10 // indirect
+ gopkg.in/guregu/null.v4 v4.0.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
)
diff --git a/go.sum b/go.sum
index 64642dc446..1672063709 100644
--- a/go.sum
+++ b/go.sum
@@ -26,6 +26,8 @@ cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -141,6 +143,8 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
@@ -241,8 +245,8 @@ github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwX
github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
-github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
-github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
+github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
+github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
@@ -257,9 +261,14 @@ github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgS
github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0=
github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU=
github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
+github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
+github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0=
+github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
+github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jferrl/go-githubauth v1.1.0 h1:jlpSklfmiRHfz5J2rU2wNkzfGSSPiVe2jPubEmDS+xc=
@@ -268,6 +277,10 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.2 h1:eVKgfIdy9b6zbWBMgFpfDPoAMifwSZagU9HmEU6zgiI=
github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jirevwe/go_partman v0.4.1 h1:2tnXLd8RxuPWTH/qH2bXUhz6h38fAWeYPhX4dkYw/48=
+github.com/jirevwe/go_partman v0.4.1/go.mod h1:WjvQDrjWQriugM832CWjRl+s6v7mBezSFG4SCti8QfA=
+github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
+github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@@ -305,6 +318,8 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/migueleliasweb/go-github-mock v0.0.23 h1:GOi9oX/+Seu9JQ19V8bPDLqDI7M9iEOjo3g8v1k6L2c=
github.com/migueleliasweb/go-github-mock v0.0.23/go.mod h1:NsT8FGbkvIZQtDu38+295sZEX8snaUiiQgsGxi6GUxk=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@@ -320,6 +335,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
+github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
@@ -334,6 +351,7 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/openshift-eng/ci-test-mapping v0.0.0-20231030141615-24a18ed8fe3a h1:bH+5JOkdlBENYZo6OaTA3ra2RjJsFFK+upv5CUAL6mM=
github.com/openshift-eng/ci-test-mapping v0.0.0-20231030141615-24a18ed8fe3a/go.mod h1:HtbWQQG60/CJDMXoRkRvcdR2WJniLk4osp2kUCW4Q3E=
+github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pjbgf/sha1cd v0.6.0 h1:3WJ8Wz8gvDz29quX1OcEmkAlUg9diU4GxJHqs0/XiwU=
@@ -583,6 +601,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/guregu/null.v4 v4.0.0 h1:1Wm3S1WEA2I26Kq+6vcW+w0gcDo44YKYD7YIEJNHDjg=
+gopkg.in/guregu/null.v4 v4.0.0/go.mod h1:YoQhUrADuG3i9WqesrCmpNRwm1ypAgSHYqoOcTu/JrI=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/redis.v5 v5.2.9 h1:MNZYOLPomQzZMfpN3ZtD1uyJ2IDonTTlxYiV/pEApiw=
gopkg.in/redis.v5 v5.2.9/go.mod h1:6gtv0/+A4iM08kdRfocWYB3bLX2tebpNtfKlFT6H4mY=
diff --git a/pkg/dataloader/prowloader/prow.go b/pkg/dataloader/prowloader/prow.go
index f01cbea16a..1a0012dbb3 100644
--- a/pkg/dataloader/prowloader/prow.go
+++ b/pkg/dataloader/prowloader/prow.go
@@ -11,7 +11,6 @@ import (
"reflect"
"regexp"
"strconv"
- "strings"
"sync"
"sync/atomic"
"time"
@@ -43,6 +42,7 @@ import (
"github.com/openshift/sippy/pkg/dataloader/prowloader/testconversion"
"github.com/openshift/sippy/pkg/db"
"github.com/openshift/sippy/pkg/db/models"
+ "github.com/openshift/sippy/pkg/db/partitionmanager"
"github.com/openshift/sippy/pkg/github/commenter"
"github.com/openshift/sippy/pkg/releaseoverride"
"github.com/openshift/sippy/pkg/synthetictests"
@@ -58,6 +58,7 @@ var gcsPathStrip = regexp.MustCompile(`.*/gs/[^/]+/`)
type ProwLoader struct {
ctx context.Context
dbc *db.DB
+ partMgr *partitionmanager.PartitionManager
errors []error
githubClient *github.Client
bigQueryClient *bqcachedclient.Client
@@ -96,11 +97,13 @@ func New(
ghCommenter *commenter.GitHubCommenter,
promPusher *push.Pusher,
loadSince *time.Time,
- syntheticReleaseJobOverrides *releaseoverride.SyntheticReleaseOverrides) *ProwLoader {
+ syntheticReleaseJobOverrides *releaseoverride.SyntheticReleaseOverrides,
+ partMgr *partitionmanager.PartitionManager) *ProwLoader {
return &ProwLoader{
ctx: ctx,
dbc: dbc,
+ partMgr: partMgr,
gcsClient: gcsClient,
githubClient: githubClient,
bigQueryClient: bigQueryClient,
@@ -373,6 +376,12 @@ func (pl *ProwLoader) loadDailyTestAnalysisByJob(ctx context.Context) error {
}
log.Infof("importing test analysis by job for dates: %v", importDates)
+ if pl.partMgr != nil {
+ if err := pl.partMgr.Maintain(ctx); err != nil {
+ log.WithError(err).Warn("partition manager maintenance failed, will create partitions on demand")
+ }
+ }
+
jobCache, err := query.LoadProwJobCache(pl.dbc)
if err != nil {
log.WithError(err).Error("error loading job cache")
@@ -394,16 +403,11 @@ func (pl *ProwLoader) loadDailyTestAnalysisByJob(ctx context.Context) error {
return errors.Wrapf(err, "error parsing next day from %s", dateToImport)
}
- // create a partition for this date
- partitionSQL := fmt.Sprintf(`CREATE TABLE IF NOT EXISTS test_analysis_by_job_by_dates_%s PARTITION OF test_analysis_by_job_by_dates
- FOR VALUES FROM ('%s') TO ('%s');`, strings.ReplaceAll(dateToImport, "-", "_"), dateToImport, nextDay)
- dLog.Info(partitionSQL)
-
- if res := pl.dbc.DB.Exec(partitionSQL); res.Error != nil {
- log.WithError(res.Error).Error("error creating partition")
- return res.Error
+ if err := partitionmanager.EnsurePartition(pl.dbc.DB, "test_analysis_by_job_by_dates", dateToImport, nextDay); err != nil {
+ log.WithError(err).Error("error creating partition")
+ return err
}
- dLog.Warnf("partition created for releases %v", pl.releases)
+ dLog.Infof("partition ensured for releases %v", pl.releases)
q := pl.bigQueryClient.Query(ctx, bqlabel.ProwLoaderTestAnalysis, fmt.Sprintf(`WITH
deduped_testcases AS (
diff --git a/pkg/db/partitionmanager/manager.go b/pkg/db/partitionmanager/manager.go
new file mode 100644
index 0000000000..e91be662f7
--- /dev/null
+++ b/pkg/db/partitionmanager/manager.go
@@ -0,0 +1,146 @@
+package partitionmanager
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "time"
+
+ partman "github.com/jirevwe/go_partman"
+ "github.com/jmoiron/sqlx"
+ log "github.com/sirupsen/logrus"
+ "gorm.io/gorm"
+)
+
+var validIdentifier = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`)
+
+const defaultSampleRate = 1 * time.Hour
+
+type TableConfig struct {
+ Name string
+ Schema string
+ PartitionColumn string
+ PartitionInterval time.Duration
+ PartitionCount uint
+ RetentionPeriod time.Duration
+}
+
+var DefaultTables = []TableConfig{
+ {
+ Name: "test_analysis_by_job_by_dates",
+ Schema: "public",
+ PartitionColumn: "date",
+ PartitionInterval: 24 * time.Hour,
+ PartitionCount: 14,
+ RetentionPeriod: 365 * 24 * time.Hour,
+ },
+}
+
+type PartitionManager struct {
+ manager *partman.Manager
+}
+
+func NewWithDefaults(gormDB *gorm.DB) (*PartitionManager, error) {
+ return New(gormDB, defaultSampleRate, DefaultTables)
+}
+
+func New(gormDB *gorm.DB, sampleRate time.Duration, tables []TableConfig) (*PartitionManager, error) {
+ if gormDB == nil {
+ return nil, fmt.Errorf("gorm DB cannot be nil")
+ }
+ sqlDB, err := gormDB.DB()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get *sql.DB from GORM: %w", err)
+ }
+
+ sqlxDB := sqlx.NewDb(sqlDB, "postgres")
+
+ partmanTables := make([]partman.Table, len(tables))
+ for i, t := range tables {
+ partmanTables[i] = partman.Table{
+ Name: t.Name,
+ Schema: t.Schema,
+ PartitionBy: t.PartitionColumn,
+ PartitionType: partman.TypeRange,
+ PartitionInterval: t.PartitionInterval,
+ PartitionCount: t.PartitionCount,
+ RetentionPeriod: t.RetentionPeriod,
+ }
+ }
+
+ cfg := &partman.Config{
+ SampleRate: sampleRate,
+ Tables: partmanTables,
+ }
+
+ manager, err := partman.NewManager(
+ partman.WithDB(sqlxDB),
+ partman.WithConfig(cfg),
+ partman.WithLogger(&logrusLogger{entry: log.WithField("source", "partman")}),
+ partman.WithClock(partman.NewRealClock()),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create partition manager: %w", err)
+ }
+
+ return &PartitionManager{manager: manager}, nil
+}
+
+func (pm *PartitionManager) Maintain(ctx context.Context) error {
+ return pm.manager.Maintain(ctx)
+}
+
+func (pm *PartitionManager) Start(ctx context.Context) error {
+ return pm.manager.Start(ctx)
+}
+
+func (pm *PartitionManager) Stop() {
+ pm.manager.Stop()
+}
+
+// EnsurePartition creates a partition for a specific date on the given table
+// if it doesn't already exist. go_partman only creates future partitions, so
+// this covers historical dates needed during initial bulk loads.
+// Uses YYYYMMDD naming to match go_partman's format for retention compatibility.
+func EnsurePartition(db *gorm.DB, table, date, nextDay string) error {
+ if !validIdentifier.MatchString(table) {
+ return fmt.Errorf("invalid table name %q", table)
+ }
+ dateParsed, err := time.Parse("2006-01-02", date)
+ if err != nil {
+ return fmt.Errorf("invalid date %q: %w", date, err)
+ }
+ nextDayParsed, err := time.Parse("2006-01-02", nextDay)
+ if err != nil {
+ return fmt.Errorf("invalid nextDay %q: %w", nextDay, err)
+ }
+ partitionName := fmt.Sprintf("%s_%s", table, dateParsed.Format("20060102"))
+ // DDL statements don't support parameterized placeholders; dates are
+ // validated above via time.Parse so interpolation is safe.
+ sql := fmt.Sprintf(
+ `CREATE TABLE IF NOT EXISTS "%s" PARTITION OF "%s" FOR VALUES FROM ('%s') TO ('%s')`,
+ partitionName, table,
+ dateParsed.Format("2006-01-02"), nextDayParsed.Format("2006-01-02"),
+ )
+
+ if res := db.Exec(sql); res.Error != nil {
+ return fmt.Errorf("error creating partition %s: %w", partitionName, res.Error)
+ }
+ return nil
+}
+
+// logrusLogger bridges logrus to go_partman's Logger interface.
+type logrusLogger struct {
+ entry *log.Entry
+}
+
+func (l *logrusLogger) Info(args ...interface{}) { l.entry.Info(args...) }
+func (l *logrusLogger) Debug(args ...interface{}) { l.entry.Debug(args...) }
+func (l *logrusLogger) Warn(args ...interface{}) { l.entry.Warn(args...) }
+func (l *logrusLogger) Error(args ...interface{}) { l.entry.Error(args...) }
+func (l *logrusLogger) Fatal(args ...interface{}) { l.entry.Fatal(args...) }
+func (l *logrusLogger) Infof(format string, args ...interface{}) { l.entry.Infof(format, args...) }
+func (l *logrusLogger) Debugf(format string, args ...interface{}) { l.entry.Debugf(format, args...) }
+func (l *logrusLogger) Warnf(format string, args ...interface{}) { l.entry.Warnf(format, args...) }
+func (l *logrusLogger) Errorf(format string, args ...interface{}) { l.entry.Errorf(format, args...) }
+func (l *logrusLogger) Fatalf(format string, args ...interface{}) { l.entry.Fatalf(format, args...) }
diff --git a/test/e2e/db/migrate/migrate_test.go b/test/e2e/db/migrate/migrate_test.go
index 129b62c066..ede8162d73 100644
--- a/test/e2e/db/migrate/migrate_test.go
+++ b/test/e2e/db/migrate/migrate_test.go
@@ -1,13 +1,17 @@
package migrate_test
import (
+ "context"
+ "fmt"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/openshift/sippy/pkg/db/migrate"
"github.com/openshift/sippy/pkg/db/models"
+ "github.com/openshift/sippy/pkg/db/partitionmanager"
"github.com/openshift/sippy/test/e2e/db/migrate/testmigrations"
"github.com/openshift/sippy/test/e2e/util"
)
@@ -177,3 +181,208 @@ func TestMigrations(t *testing.T) {
require.NoError(t, err, "database connection should still work after multiple migration operations")
})
}
+
+func TestPartitionManager(t *testing.T) {
+ const testTable = "e2e_partitioned_test"
+
+ dbc := util.CreateE2EPostgresConnection(t)
+
+ dropPartmanState := func(t *testing.T) {
+ t.Helper()
+ if err := dbc.DB.Exec("DROP SCHEMA IF EXISTS partman CASCADE").Error; err != nil {
+ t.Logf("cleanup: drop partman schema: %v", err)
+ }
+ var childTables []string
+ if err := dbc.DB.Raw(
+ "SELECT c.relname FROM pg_inherits JOIN pg_class c ON c.oid = pg_inherits.inhrelid WHERE inhparent = ?::regclass",
+ testTable,
+ ).Scan(&childTables).Error; err != nil {
+ t.Logf("cleanup: query child tables: %v", err)
+ return
+ }
+ for _, child := range childTables {
+ if err := dbc.DB.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS "%s"`, child)).Error; err != nil {
+ t.Logf("cleanup: drop child table %s: %v", child, err)
+ }
+ }
+ }
+
+ t.Cleanup(func() {
+ dropPartmanState(t)
+ if err := dbc.DB.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS "%s" CASCADE`, testTable)).Error; err != nil {
+ t.Logf("cleanup: drop test table: %v", err)
+ }
+ })
+
+ // Create a test partitioned table
+ err := dbc.DB.Exec(fmt.Sprintf(`CREATE TABLE IF NOT EXISTS "%s" (
+ date timestamp with time zone,
+ value text
+ ) PARTITION BY RANGE (date)`, testTable)).Error
+ require.NoError(t, err, "should create test partitioned table")
+
+ t.Run("EnsurePartition creates and is idempotent", func(t *testing.T) {
+ date := "2025-06-01"
+ nextDay := "2025-06-02"
+
+ err := partitionmanager.EnsurePartition(dbc.DB, testTable, date, nextDay)
+ require.NoError(t, err, "should create partition")
+
+ // Verify partition exists
+ var exists bool
+ err = dbc.DB.Raw(
+ "SELECT EXISTS(SELECT 1 FROM pg_tables WHERE tablename = ?)",
+ fmt.Sprintf("%s_20250601", testTable),
+ ).Scan(&exists).Error
+ require.NoError(t, err)
+ assert.True(t, exists, "partition table should exist")
+
+ // Calling again should be idempotent
+ err = partitionmanager.EnsurePartition(dbc.DB, testTable, date, nextDay)
+ require.NoError(t, err, "second EnsurePartition call should succeed (idempotent)")
+ })
+
+ t.Run("write and read rows through partitioned table", func(t *testing.T) {
+ date := "2025-06-01"
+ nextDay := "2025-06-02"
+
+ err := partitionmanager.EnsurePartition(dbc.DB, testTable, date, nextDay)
+ require.NoError(t, err)
+
+ // Insert a row — it should be routed to the partition automatically
+ ts := time.Date(2025, 6, 1, 12, 0, 0, 0, time.UTC)
+ err = dbc.DB.Exec(
+ fmt.Sprintf(`INSERT INTO "%s" (date, value) VALUES (?, ?)`, testTable),
+ ts, "hello from partition",
+ ).Error
+ require.NoError(t, err, "should insert into partitioned table")
+
+ // Read it back via the parent table
+ var value string
+ err = dbc.DB.Raw(
+ fmt.Sprintf(`SELECT value FROM "%s" WHERE date = ?`, testTable), ts,
+ ).Scan(&value).Error
+ require.NoError(t, err, "should read row from partitioned table")
+ assert.Equal(t, "hello from partition", value)
+
+ // Verify the row physically lives in the partition, not just the parent
+ partName := fmt.Sprintf("%s_20250601", testTable)
+ var count int64
+ err = dbc.DB.Raw(
+ fmt.Sprintf(`SELECT count(*) FROM "%s"`, partName),
+ ).Scan(&count).Error
+ require.NoError(t, err)
+ assert.Equal(t, int64(1), count, "row should be in the partition table")
+
+ // Insert a second row on a different date — requires its own partition
+ err = partitionmanager.EnsurePartition(dbc.DB, testTable, nextDay, "2025-06-03")
+ require.NoError(t, err)
+
+ ts2 := time.Date(2025, 6, 2, 8, 30, 0, 0, time.UTC)
+ err = dbc.DB.Exec(
+ fmt.Sprintf(`INSERT INTO "%s" (date, value) VALUES (?, ?)`, testTable),
+ ts2, "second day",
+ ).Error
+ require.NoError(t, err, "should insert into second partition")
+
+ // Both rows visible from the parent table
+ var total int64
+ err = dbc.DB.Raw(
+ fmt.Sprintf(`SELECT count(*) FROM "%s"`, testTable),
+ ).Scan(&total).Error
+ require.NoError(t, err)
+ assert.Equal(t, int64(2), total, "parent table should see rows across partitions")
+ })
+
+ t.Run("EnsurePartition rejects invalid table names", func(t *testing.T) {
+ err := partitionmanager.EnsurePartition(dbc.DB, "robert'; DROP TABLE students;--", "2025-06-01", "2025-06-02")
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "invalid table name")
+ })
+
+ t.Run("EnsurePartition rejects invalid dates", func(t *testing.T) {
+ err := partitionmanager.EnsurePartition(dbc.DB, testTable, "not-a-date", "2025-06-02")
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "invalid date")
+ })
+
+ t.Run("Maintain creates future partitions", func(t *testing.T) {
+ ctx := context.Background()
+ partitionCount := uint(3)
+
+ dropPartmanState(t)
+
+ pm, err := partitionmanager.New(dbc.DB, time.Hour, []partitionmanager.TableConfig{
+ {
+ Name: testTable,
+ Schema: "public",
+ PartitionColumn: "date",
+ PartitionInterval: 24 * time.Hour,
+ PartitionCount: partitionCount,
+ RetentionPeriod: 365 * 24 * time.Hour,
+ },
+ })
+ require.NoError(t, err, "should create partition manager")
+
+ err = pm.Maintain(ctx)
+ require.NoError(t, err, "Maintain should succeed")
+
+ // Verify future partitions were created
+ today := time.Now().UTC().Truncate(24 * time.Hour)
+ for i := uint(0); i < partitionCount; i++ {
+ d := today.Add(time.Duration(i) * 24 * time.Hour)
+ partName := fmt.Sprintf("%s_%s", testTable, d.Format("20060102"))
+ var exists bool
+ err = dbc.DB.Raw(
+ "SELECT EXISTS(SELECT 1 FROM pg_tables WHERE tablename = ?)", partName,
+ ).Scan(&exists).Error
+ require.NoError(t, err)
+ assert.True(t, exists, "future partition %s should exist", partName)
+ }
+ })
+
+ t.Run("Maintain drops partitions beyond retention", func(t *testing.T) {
+ ctx := context.Background()
+
+ dropPartmanState(t)
+
+ // Create a partition well in the past (3 days ago)
+ oldDate := time.Now().UTC().Add(-3 * 24 * time.Hour).Truncate(24 * time.Hour)
+ oldDateStr := oldDate.Format("2006-01-02")
+ oldNextDay := oldDate.Add(24 * time.Hour).Format("2006-01-02")
+
+ err := partitionmanager.EnsurePartition(dbc.DB, testTable, oldDateStr, oldNextDay)
+ require.NoError(t, err, "should create old partition")
+
+ oldPartName := fmt.Sprintf("%s_%s", testTable, oldDate.Format("20060102"))
+ var exists bool
+ err = dbc.DB.Raw(
+ "SELECT EXISTS(SELECT 1 FROM pg_tables WHERE tablename = ?)", oldPartName,
+ ).Scan(&exists).Error
+ require.NoError(t, err)
+ require.True(t, exists, "old partition should exist before Maintain")
+
+ // Create manager with 1-hour retention so the old partition is beyond cutoff
+ pm, err := partitionmanager.New(dbc.DB, time.Hour, []partitionmanager.TableConfig{
+ {
+ Name: testTable,
+ Schema: "public",
+ PartitionColumn: "date",
+ PartitionInterval: 24 * time.Hour,
+ PartitionCount: 3,
+ RetentionPeriod: 1 * time.Hour,
+ },
+ })
+ require.NoError(t, err, "should create partition manager with short retention")
+
+ err = pm.Maintain(ctx)
+ require.NoError(t, err, "Maintain should succeed")
+
+ // The old partition (3 days ago) should have been dropped
+ err = dbc.DB.Raw(
+ "SELECT EXISTS(SELECT 1 FROM pg_tables WHERE tablename = ?)", oldPartName,
+ ).Scan(&exists).Error
+ require.NoError(t, err)
+ assert.False(t, exists, "old partition %s should be dropped by retention policy", oldPartName)
+ })
+}
diff --git a/vendor/github.com/jackc/pgservicefile/.travis.yml b/vendor/github.com/jackc/pgservicefile/.travis.yml
deleted file mode 100644
index e176228e8e..0000000000
--- a/vendor/github.com/jackc/pgservicefile/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - 1.x
- - tip
-
-matrix:
- allow_failures:
- - go: tip
diff --git a/vendor/github.com/jackc/pgservicefile/README.md b/vendor/github.com/jackc/pgservicefile/README.md
index e50ca12627..2fc7e012cb 100644
--- a/vendor/github.com/jackc/pgservicefile/README.md
+++ b/vendor/github.com/jackc/pgservicefile/README.md
@@ -1,5 +1,6 @@
-[](https://godoc.org/github.com/jackc/pgservicefile)
-[](https://travis-ci.org/jackc/pgservicefile)
+[](https://pkg.go.dev/github.com/jackc/pgservicefile)
+[](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml)
+
# pgservicefile
diff --git a/vendor/github.com/jackc/pgservicefile/pgservicefile.go b/vendor/github.com/jackc/pgservicefile/pgservicefile.go
index 797bbab9e7..c62caa7fef 100644
--- a/vendor/github.com/jackc/pgservicefile/pgservicefile.go
+++ b/vendor/github.com/jackc/pgservicefile/pgservicefile.go
@@ -57,7 +57,7 @@ func ParseServicefile(r io.Reader) (*Servicefile, error) {
} else if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
service = &Service{Name: line[1 : len(line)-1], Settings: make(map[string]string)}
servicefile.Services = append(servicefile.Services, service)
- } else {
+ } else if service != nil {
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("unable to parse line %d", lineNum)
@@ -67,6 +67,8 @@ func ParseServicefile(r io.Reader) (*Servicefile, error) {
value := strings.TrimSpace(parts[1])
service.Settings[key] = value
+ } else {
+ return nil, fmt.Errorf("line %d is not in a section", lineNum)
}
}
diff --git a/vendor/github.com/jirevwe/go_partman/.gitignore b/vendor/github.com/jirevwe/go_partman/.gitignore
new file mode 100644
index 0000000000..42e4918e9d
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/.gitignore
@@ -0,0 +1,26 @@
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
+go.work.sum
+
+# env file
+.env
+.idea
\ No newline at end of file
diff --git a/vendor/github.com/jirevwe/go_partman/LICENSE b/vendor/github.com/jirevwe/go_partman/LICENSE
new file mode 100644
index 0000000000..215dac47a7
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2024 Raymond Tukpe
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/jirevwe/go_partman/README.md b/vendor/github.com/jirevwe/go_partman/README.md
new file mode 100644
index 0000000000..6b65887777
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/README.md
@@ -0,0 +1,197 @@
+# go_partman
+
+A Go native implementation of PostgreSQL table partitioning management, inspired by [pg_partman](https://github.com/pgpartman/pg_partman). This library helps you automatically manage and maintain partitioned tables in PostgreSQL databases.
+
+> Disclaimers:
+> 1. This library is currently in alpha, hence the public APIs might change.
+> 2. This library was built and is currently used to manage retention policies in [Convoy](https://github.com/frain-dev/convoy).
+> 3. It is currently behind a feature flag (I'll update this disclaimer once it's GA).
+> 4. This is the accompanying [pull request](https://github.com/frain-dev/convoy/pull/2194/files#diff-6c0399450dc8551e4cd42255ec24371c113d5b7771f6c6fdc0387cb0bc3df7f2) in Convoy if you want to see how it is integrated.
+
+#### Built By
+
+
+
+## Features
+
+- Automatic partition creation and management
+- Support for time-based range partitioning
+- Configurable retention policies
+- Automatic cleanup of old partitions
+- Pre-creation of future partitions
+- Multi-tenant support with tenant-specific partitioning
+- Extensible pre-drop hooks for custom cleanup logic
+
+## Installation
+
+```bash
+go get github.com/jirevwe/go_partman
+```
+
+## Usage
+
+### Basic Setup
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+ "github.com/jackc/pgx/v5/pgxpool"
+ "github.com/jackc/pgx/v5/stdlib"
+ "github.com/jirevwe/go_partman"
+ "github.com/jmoiron/sqlx"
+
+ "log"
+ "log/slog"
+ "os"
+
+ "time"
+)
+
+func main() {
+ pgxCfg, err := pgxpool.ParseConfig("postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ pool, err := pgxpool.NewWithConfig(context.Background(), pgxCfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ sqlDB := stdlib.OpenDBFromPool(pool)
+ db := sqlx.NewDb(sqlDB, "pgx")
+
+ config := &partman.Config{
+ SampleRate: 30 * time.Second,
+ SchemaName: "convoy",
+ }
+
+ clock := partman.NewRealClock()
+ manager, err := partman.NewAndStart(db, config, slog.New(slog.NewTextHandler(os.Stdout, nil)), clock)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if err = manager.Start(context.Background()); err != nil {
+ log.Fatal(err)
+ }
+
+ time.Sleep(30 * time.Second)
+}
+
+```
+
+### Multi-tenant Setup
+
+```go
+config := partman.Config{
+ SchemaName: "public",
+ Tables: []partman.TableConfig{
+ {
+ Name: "events",
+ TenantId: "tenant1", // Specify tenant ID
+ TenantIdColumn: "project_id", // Column name for tenant ID
+ PartitionType: partman.TypeRange,
+ PartitionBy: "created_at",
+ PartitionInterval: time.Hour * 24,
+ PartitionCount: 10,
+ RetentionPeriod: time.Hour * 24 * 7,
+ },
+ },
+}
+```
+
+### Adding a Managed Table
+
+You can add a new managed table to the partition manager using the `AddManagedTable` method:
+
+```go
+newTableConfig := partman.TableConfig{
+ Name: "new_events",
+ TenantId: "tenant1", // Specify tenant ID
+ TenantIdColumn: "project_id", // Column name for tenant ID
+ PartitionType: partman.TypeRange,
+ PartitionBy: "created_at",
+ PartitionInterval: time.Hour * 24,
+ PartitionCount: 10,
+ RetentionPeriod: time.Hour * 24 * 7,
+}
+
+// Add the new managed table
+if err := manager.AddManagedTable(newTableConfig); err != nil {
+ log.Fatal(err)
+}
+```
+
+### Import Exsiting Partitions
+
+You can add a new managed table to the partition manager using the `AddManagedTable` method:
+
+```go
+err = manager.ImportExistingPartitions(context.Background(), partman.Table{
+ TenantIdColumn: "project_id",
+ PartitionBy: "created_at",
+ PartitionType: partman.TypeRange,
+ PartitionInterval: time.Hour * 24,
+ PartitionCount: 10,
+ RetentionPeriod: time.Hour * 24 * 7,
+})
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+### Table Requirements
+
+Your table must be created as a partitioned table before using go_partman. Examples:
+
+```sql
+-- Single-tenant table
+CREATE TABLE events (
+ id VARCHAR NOT NULL,
+ created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
+ data JSONB,
+ PRIMARY KEY (id, created_at)
+) PARTITION BY RANGE (created_at);
+
+-- Multi-tenant table
+CREATE TABLE events (
+ id VARCHAR NOT NULL,
+ project_id VARCHAR NOT NULL,
+ created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
+ data JSONB,
+ PRIMARY KEY (id, created_at, project_id)
+) PARTITION BY RANGE (project_id, created_at);
+```
+
+## Features in Detail
+
+### Partition Types
+
+Currently supports:
+- **Range Partitioning**: Time-based range partitioning with optional tenant ID support
+- **List Partitioning**: Planned for future release
+- **Hash Partitioning**: Planned for future release
+
+### Maintenance Operations
+
+- Automatically creates new partitions ahead of time based on `PartitionCount`
+- Drops old partitions based on `RetentionPeriod`
+- Supports custom pre-drop hooks for data archival or backup operations
+
+### Multi-tenant Support
+
+- Optional tenant-based partitioning using `TenantId` and `TenantIdColumn`
+- Separate partition management per tenant
+- Automatic partition naming with tenant ID inclusion
+
+## Contributing
+
+Contributions are welcome! Please feel free to submit a Pull Request.
+
+## License
+
+MIT
diff --git a/vendor/github.com/jirevwe/go_partman/SIPPY_VENDOR_PATCHES.md b/vendor/github.com/jirevwe/go_partman/SIPPY_VENDOR_PATCHES.md
new file mode 100644
index 0000000000..8fffbabb63
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/SIPPY_VENDOR_PATCHES.md
@@ -0,0 +1,69 @@
+# Vendor Patches for go_partman
+
+This file documents patches applied to the vendored copy of
+`github.com/jirevwe/go_partman` (v0.4.1). These patches work around
+upstream bugs in go_partman's handling of non-multi-tenant tables and
+should be removed if the upstream library addresses them.
+
+## 1. Default tenant for non-multi-tenant tables
+
+**File:** `manager.go`, function `initialize`
+
+**Problem:** go_partman was designed around multi-tenant workloads.
+Two internal functions assume at least one row exists in
+`partman.tenants` for each parent table:
+
+- `CreateFuturePartitions` queries `partman.tenants` and iterates over
+ the results to create partitions. For non-multi-tenant tables no
+ tenant rows exist, so the loop body never executes and no partitions
+ are created.
+
+- `importExistingPartitions` discovers existing child tables and
+ inserts management entries into `partman.partitions` with
+ `tenant_id = ''` (empty string, not SQL NULL). The
+ `validate_tenant_id` trigger on that table checks
+ `NEW.tenant_id IS NOT NULL` — empty string passes that check — then
+ verifies the tenant exists in `partman.tenants`. Because no tenant
+ row with `id = ''` exists, the trigger raises an exception and the
+ import fails.
+
+**Fix:** After `CreateParentTable` returns the parent table ID, insert
+a default tenant row with an empty-string ID when `TenantIdColumn` is
+empty (i.e. the table is not multi-tenant):
+
+```go
+if table.TenantIdColumn == "" {
+ if _, err = m.db.ExecContext(ctx, insertTenantSQL, "", id); err != nil {
+ return fmt.Errorf("failed to register default tenant for table %s: %w", table.Name, err)
+ }
+}
+```
+
+**Why the empty string is safe:** Every branch in go_partman that
+changes behavior based on tenant ID uses `len(x) > 0` guards:
+
+| Code path | Guard | Result with `""` |
+|---|---|---|
+| `generatePartitionName` | `len(tc.TenantId) > 0` | Uses `table_YYYYMMDD` (no tenant prefix) |
+| `generateRangePartitionSQL` | `len(tc.TenantId) > 0` | Uses non-tenant DDL template |
+| `DropOldPartitions` | `len(table.TenantId) > 0` | Pattern is `table_%` (no tenant infix) |
+| `checkTableColumnsExist` | `len(TenantIdColumn) > 0 && len(tenantId) > 0` | Skips tenant column check |
+| `buildTableName` | `tenantId != "" && len(tenantId) > 0` | Returns `schema.table` |
+| `importExistingPartitions` | `len(p.TenantFrom.String) > 0` | Skips duplicate tenant registration |
+
+The empty-string tenant satisfies the `partman.tenants` foreign key
+and `validate_tenant_id` trigger without affecting partition naming,
+DDL generation, or drop patterns. `insertTenantSQL` uses
+`ON CONFLICT DO NOTHING`, so repeated calls (e.g. server restarts)
+are idempotent. The tenant row cascades on parent table deletion.
+
+## 2. Embedded web assets placeholder
+
+**Directory:** `web/dist/index.html`
+
+go_partman's `ui.go` contains `//go:embed web/dist` but the published
+module does not include built web assets. Go's vendoring copies only
+`.go` files, so `go mod vendor` fails with
+`pattern web/dist: no matching files found`. A minimal placeholder
+`index.html` is provided so the embed directive resolves. The UI is
+not used by Sippy.
diff --git a/vendor/github.com/jirevwe/go_partman/clock.go b/vendor/github.com/jirevwe/go_partman/clock.go
new file mode 100644
index 0000000000..81ef08abb5
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/clock.go
@@ -0,0 +1,54 @@
+package partman
+
+import (
+ "sync"
+ "time"
+)
+
+// A Clock is an object that can tell you the current time.
+//
+// This interface allows decoupling code that uses time from the code that creates
+// a point in time. You can use this to your advantage by injecting Clocks into interfaces
+// rather than having implementations call time.Now() directly.
+//
+// Use RealClock() in production.
+// Use SimulatedClock() in test.
+type Clock interface {
+ Now() time.Time
+}
+
+func NewRealClock() Clock { return &realTimeClock{} }
+
+type realTimeClock struct{}
+
+func (_ *realTimeClock) Now() time.Time { return time.Now() }
+
+// A SimulatedClock is a concrete Clock implementation that doesn't "tick" on its own.
+// Time is advanced by explicit call to the AdvanceTime() or SetTime() functions.
+// This object is concurrency safe.
+type SimulatedClock struct {
+ mu *sync.Mutex
+ t time.Time
+}
+
+func NewSimulatedClock(t time.Time) *SimulatedClock {
+ return &SimulatedClock{mu: &sync.Mutex{}, t: t}
+}
+
+func (c *SimulatedClock) Now() time.Time {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.t
+}
+
+func (c *SimulatedClock) SetTime(t time.Time) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.t = t
+}
+
+func (c *SimulatedClock) AdvanceTime(d time.Duration) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.t = c.t.Add(d)
+}
diff --git a/vendor/github.com/jirevwe/go_partman/logger.go b/vendor/github.com/jirevwe/go_partman/logger.go
new file mode 100644
index 0000000000..ac2717fb9a
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/logger.go
@@ -0,0 +1,91 @@
+package partman
+
+import (
+ "fmt"
+ "log/slog"
+ "os"
+)
+
+type Logger interface {
+ Info(args ...interface{})
+ Debug(args ...interface{})
+ Warn(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+}
+
+// SlogLogger implements the Logger interface using the slog package
+type SlogLogger struct {
+ logger *slog.Logger
+}
+
+// NewSlogLogger creates a new SlogLogger instance
+func NewSlogLogger(opts ...slog.HandlerOptions) *SlogLogger {
+ var handler slog.Handler
+
+ if len(opts) > 0 {
+ handler = slog.NewJSONHandler(os.Stdout, &opts[0])
+ } else {
+ handler = slog.NewJSONHandler(os.Stdout, nil)
+ }
+
+ return &SlogLogger{logger: slog.New(handler)}
+}
+
+// Info logs an info message
+func (l *SlogLogger) Info(args ...interface{}) {
+ l.logger.Info(fmt.Sprint(args[0]), args[1:]...)
+}
+
+// Debug logs a debug message
+func (l *SlogLogger) Debug(args ...interface{}) {
+ l.logger.Debug(fmt.Sprint(args[0]), args[1:]...)
+}
+
+// Warn logs a warning message
+func (l *SlogLogger) Warn(args ...interface{}) {
+ l.logger.Warn(fmt.Sprint(args[0]), args[1:]...)
+}
+
+// Error logs an error message
+func (l *SlogLogger) Error(args ...interface{}) {
+ l.logger.Error(fmt.Sprint(args[0]), args[1:]...)
+}
+
+// Fatal logs a fatal message and exits
+func (l *SlogLogger) Fatal(args ...interface{}) {
+ l.logger.Error(fmt.Sprint(args[0]), args[1:]...)
+ os.Exit(1)
+}
+
+// Infof logs a formatted info message
+func (l *SlogLogger) Infof(format string, args ...interface{}) {
+ l.logger.Info(fmt.Sprintf(format, args...))
+}
+
+// Debugf logs a formatted debug message
+func (l *SlogLogger) Debugf(format string, args ...interface{}) {
+ l.logger.Debug(fmt.Sprintf(format, args...))
+}
+
+// Warnf logs a formatted warning message
+func (l *SlogLogger) Warnf(format string, args ...interface{}) {
+ l.logger.Warn(fmt.Sprintf(format, args...))
+}
+
+// Errorf logs a formatted error message
+func (l *SlogLogger) Errorf(format string, args ...interface{}) {
+ l.logger.Error(fmt.Sprintf(format, args...))
+}
+
+// Fatalf logs a formatted fatal message and exits
+func (l *SlogLogger) Fatalf(format string, args ...interface{}) {
+ l.logger.Error(fmt.Sprintf(format, args...))
+ os.Exit(1)
+}
diff --git a/vendor/github.com/jirevwe/go_partman/manager.go b/vendor/github.com/jirevwe/go_partman/manager.go
new file mode 100644
index 0000000000..fd86b86879
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/manager.go
@@ -0,0 +1,880 @@
+package partman
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ "gopkg.in/guregu/null.v4"
+
+ "github.com/jmoiron/sqlx"
+ "github.com/oklog/ulid/v2"
+)
+
+// todo(raymond): add metrics
+
+// Manager Partition manager
+type Manager struct {
+ db *sqlx.DB
+ logger Logger
+ config *Config
+ clock Clock
+ hook Hook
+ partitions map[tableName]Partition
+ mu *sync.RWMutex
+ wg *sync.WaitGroup // For testing synchronization
+ stop chan struct{} // For graceful shutdown
+}
+
+func NewManager(options ...Option) (*Manager, error) {
+ m := &Manager{
+ mu: &sync.RWMutex{},
+ wg: &sync.WaitGroup{},
+ stop: make(chan struct{}),
+ partitions: make(map[tableName]Partition),
+ }
+
+ for _, opt := range options {
+ err := opt(m)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if m.db == nil {
+ return nil, ErrDbDriverMustNotBeNil
+ }
+
+ if m.logger == nil {
+ return nil, ErrLoggerMustNotBeNil
+ }
+
+ if m.config == nil {
+ return nil, ErrConfigMustNotBeNil
+ }
+
+ if m.clock == nil {
+ return nil, ErrClockMustNotBeNil
+ }
+
+ if err := m.runMigrations(context.Background()); err != nil {
+ return nil, err
+ }
+
+ if err := m.initialize(context.Background(), m.config); err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
+
+func (m *Manager) GetConfig() Config {
+ return *m.config
+}
+
+// runMigrations runs all the migrations on the management partitions while keeping them backwards compatible
+func (m *Manager) runMigrations(ctx context.Context) error {
+ migrations := []string{
+ createPartmanSchema,
+ createParentsTable,
+ createTenantsTable,
+ createPartitionsTable,
+ createValidateTenantFunction,
+ createTriggerOnPartitionInsert,
+ }
+
+ tx, err := m.db.Begin()
+ if err != nil {
+ return fmt.Errorf("failed to begin transaction: %w", err)
+ }
+
+ for _, migration := range migrations {
+ if _, innerErr := tx.ExecContext(ctx, migration); innerErr != nil {
+ return fmt.Errorf("failed to run migration: %s, with error %w", migration, innerErr)
+ }
+ }
+
+ err = tx.Commit()
+ if err != nil {
+ rollbackErr := tx.Rollback()
+ if rollbackErr != nil {
+ m.logger.Error("failed to rollback transaction", "error", rollbackErr)
+ }
+ return fmt.Errorf("failed to commit transaction: %w", err)
+ }
+
+ return nil
+}
+
+func (m *Manager) initialize(ctx context.Context, config *Config) error {
+ // Handle new ParentTables API
+ for _, table := range config.Tables {
+ m.logger.Info("creating parent table", "table", table.Name)
+ id, err := m.CreateParentTable(ctx, table)
+ if err != nil {
+ return fmt.Errorf("failed to create parent table %s: %w", table.Name, err)
+ }
+
+ table.Id = id
+
+ // For non-multi-tenant tables, register a default tenant with an
+ // empty-string ID so CreateFuturePartitions and
+ // importExistingPartitions work correctly.
+ if table.TenantIdColumn == "" {
+ if _, err = m.db.ExecContext(ctx, insertTenantSQL, "", id); err != nil {
+ return fmt.Errorf("failed to register default tenant for table %s: %w", table.Name, err)
+ }
+ }
+
+ // Import existing partitions for this table
+ err = m.importExistingPartitions(ctx, table)
+ if err != nil {
+ return fmt.Errorf("failed to import existing partitions for table %s: %w", table.Name, err)
+ }
+ }
+
+ return nil
+}
+
+// CreateFuturePartitions creates partitions for all parent tables and pegs all timestamps to UTC
+func (m *Manager) CreateFuturePartitions(ctx context.Context, tc Table) error {
+ // Determine start time for new partitions
+ today := m.clock.Now().UTC()
+
+ // get the tenants for this table
+ var tenants []struct {
+ ParentTableId string `db:"parent_table_id"`
+ TenantId string `db:"tenant_id"`
+ }
+ err := m.db.SelectContext(ctx, &tenants, getTenantsQuery, tc.Name, tc.Schema)
+ if err != nil {
+ return fmt.Errorf("failed to fetch tenants: %w", err)
+ }
+
+ for _, te := range tenants {
+ // for each tenant, create the future partitions
+ for i := uint(0); i < tc.PartitionCount; i++ {
+ bounds := Bounds{
+ From: today.Add(time.Duration(i) * tc.PartitionInterval).UTC(),
+ To: today.Add(time.Duration(i+1) * tc.PartitionInterval).UTC(),
+ }
+
+ // Check if partition already exists
+ partitionName := m.generatePartitionName(Tenant{
+ TableName: tc.Name,
+ TableSchema: tc.Schema,
+ TenantId: te.TenantId,
+ }, bounds)
+ exists, innerErr := m.partitionExists(ctx, partitionName, tc.Schema)
+ if innerErr != nil {
+ return fmt.Errorf("failed to check if partition exists: %w", innerErr)
+ }
+
+ if exists {
+ m.logger.Info("partition already exists",
+ "table", tc.Name,
+ "tenant", "",
+ "partition", partitionName,
+ "from", bounds.From,
+ "to", bounds.To)
+ continue
+ }
+
+ tempTenant := Tenant{
+ TableName: tc.Name,
+ TableSchema: tc.Schema,
+ TenantId: te.TenantId,
+ }
+ // Create the partition
+ innerErr = m.createPartition(ctx, tc, tempTenant, bounds)
+ if innerErr != nil {
+ return fmt.Errorf("failed to create future partition: %w", innerErr)
+ }
+
+ m.logger.Info("created future partition",
+ "table", tc.Name,
+ "tenant", "",
+ "partition", partitionName,
+ "from", bounds.From,
+ "to", bounds.To)
+ }
+ }
+
+ return nil
+}
+
+// partitionExists checks if a partition table already exists
+func (m *Manager) partitionExists(ctx context.Context, partitionName, partitionSchemaName string) (bool, error) {
+ var exists bool
+ err := m.db.QueryRowContext(ctx, getPartitionExists, partitionSchemaName, partitionName).Scan(&exists)
+ if err != nil {
+ return false, fmt.Errorf("failed to check partition existence: %w", err)
+ }
+
+ return exists, nil
+}
+
+func (m *Manager) DropOldPartitions(ctx context.Context) error {
+ // Get all managed tables and their retention periods
+ type managedTable struct {
+ TableName string `db:"table_name"`
+ SchemaName string `db:"schema_name"`
+ TenantId string `db:"tenant_id"`
+ RetentionPeriod TimeDuration `db:"retention_period"`
+ }
+
+ var tables []managedTable
+ if err := m.db.SelectContext(ctx, &tables, getManagedTablesRetentionPeriods); err != nil {
+ return fmt.Errorf("failed to fetch managed tables: %w", err)
+ }
+
+ for _, table := range tables {
+ // Find partitions older than the retention period
+ cutoffTime := m.clock.Now().Add(time.Duration(-table.RetentionPeriod))
+ m.logger.Info("dropping old partitions",
+ "cutoff_time", cutoffTime,
+ "table", table.TableName)
+ pattern := fmt.Sprintf("%s_%%", table.TableName)
+ if len(table.TenantId) > 0 {
+ pattern = fmt.Sprintf("%s_%s_%%", table.TableName, table.TenantId)
+ }
+
+ var partitions []string
+ if err := m.db.SelectContext(ctx, &partitions, partitionsQuery, table.SchemaName, pattern); err != nil {
+ return fmt.Errorf("failed to fetch partitions for table %s: %w", table.TableName, err)
+ }
+
+ for _, partition := range partitions {
+ // Extract date from partition name
+ datePart, err := extractDateFromString(partition)
+ if err != nil {
+ return err
+ }
+
+ partitionDate, err := time.Parse(DateNoHyphens, datePart)
+ if err != nil {
+ m.logger.Error("failed to parse partition date",
+ "partition", partition,
+ "error", err)
+ continue
+ }
+
+ // Check if the partition is older than the retention period
+ if partitionDate.Before(cutoffTime) {
+ if m.hook != nil {
+ // run any pre-drop hooks (backup data, upload to object storage)
+ // todo(raymond): pass a context with a deadline to this func
+ if err = m.hook(ctx, partition); err != nil {
+ m.logger.Error("failed to run pre-drop hooks",
+ "partition", partition,
+ "error", err)
+ continue
+ }
+ }
+
+ m.logger.Info("no hook func was specified",
+ "table", table.TableName,
+ "partition", partition,
+ "date", partitionDate)
+
+ // Drop the partition
+ if _, err = m.db.ExecContext(ctx, fmt.Sprintf(dropPartition, table.SchemaName, partition)); err != nil {
+ m.logger.Error("failed to drop partition",
+ "partition", partition,
+ "error", err)
+ continue
+ }
+
+ m.logger.Info("dropped old partition",
+ "table", table.TableName,
+ "partition", partition,
+ "date", partitionDate)
+ }
+ }
+ }
+
+ return nil
+}
+
+// createPartition creates a partition for a table
+func (m *Manager) createPartition(ctx context.Context, tc Table, te Tenant, bounds Bounds) error {
+ // Generate a partition name based on bounds
+ partitionName := m.generatePartitionName(te, bounds)
+
+ // Create SQL for partition
+ pQuery, err := m.generatePartitionSQL(partitionName, tc, te, bounds)
+ if err != nil {
+ return err
+ }
+
+ m.logger.Info(pQuery)
+
+ // Execute partition creation
+ _, err = m.db.ExecContext(ctx, pQuery)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Maintain defines a regularly run maintenance routine
+func (m *Manager) Maintain(ctx context.Context) error {
+ // fetch all tables and run maintenance tasks
+ tables, err := m.GetParentTables(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to get parent tables: %w", err)
+ }
+
+ // Drop old partitions if needed
+ if dropErr := m.DropOldPartitions(ctx); dropErr != nil {
+ return fmt.Errorf("failed to drop old partitions: %w", dropErr)
+ }
+
+ for _, table := range tables {
+ // Check for necessary future partitions
+ if innerErr := m.CreateFuturePartitions(ctx, table); innerErr != nil {
+ return fmt.Errorf("failed to create future partitions: %w", innerErr)
+ }
+ }
+
+ return nil
+}
+
+// generatePartitionSQL generates the name of the partition table
+func (m *Manager) generatePartitionSQL(name string, tc Table, te Tenant, b Bounds) (string, error) {
+ switch tc.PartitionType {
+ case "range":
+ return m.generateRangePartitionSQL(name, te, b), nil
+ case "list", "hash":
+ return "", fmt.Errorf("list and hash partitions are not implemented yet %q", tc.PartitionType)
+ default:
+ return "", fmt.Errorf("unsupported partition type %q", tc.PartitionType)
+ }
+}
+
+func (m *Manager) generateRangePartitionSQL(name string, tc Tenant, b Bounds) string {
+ if len(tc.TenantId) > 0 {
+ return fmt.Sprintf(generatePartitionWithTenantIdQuery,
+ tc.TableSchema, name,
+ tc.TableSchema, tc.TableName,
+ tc.TenantId, b.From.UTC().Format(time.DateOnly),
+ tc.TenantId, b.To.UTC().Format(time.DateOnly))
+ }
+ return fmt.Sprintf(generatePartitionQuery,
+ tc.TableSchema, name,
+ tc.TableSchema, tc.TableName,
+ b.From.UTC().Format(time.DateOnly),
+ b.To.UTC().Format(time.DateOnly))
+}
+
+func (m *Manager) checkTableColumnsExist(ctx context.Context, tc Table, tenantId string) error {
+ if len(tc.TenantIdColumn) > 0 && len(tenantId) > 0 {
+ var exists bool
+ err := m.db.QueryRowxContext(ctx, checkColumnExists, tc.Schema, tc.Name, tc.TenantIdColumn).Scan(&exists)
+ if err != nil {
+ return err
+ }
+
+ if !exists {
+ return fmt.Errorf("table %s does not have a tenant id column", tc.Name)
+ }
+ }
+
+ var exists bool
+ err := m.db.QueryRowxContext(ctx, checkColumnExists, tc.Schema, tc.Name, tc.PartitionBy).Scan(&exists)
+ if err != nil {
+ return err
+ }
+
+ if !exists {
+ return fmt.Errorf("table %s does not have a timestamp column named %s", tc.Name, tc.PartitionBy)
+ }
+
+ return nil
+}
+
+// Update partition name and SQL formatting to use UTC
+func (m *Manager) generatePartitionName(tc Tenant, b Bounds) string {
+ datePart := b.From.UTC().Format(DateNoHyphens)
+
+ if len(tc.TenantId) > 0 {
+ return fmt.Sprintf("%s_%s_%s", tc.TableName, tc.TenantId, datePart)
+ }
+ return fmt.Sprintf("%s_%s", tc.TableName, datePart)
+}
+
+func extractDateFromString(input string) (string, error) {
+ // Regular expression to match exactly 8 digits at the end of the string
+ re, err := regexp.Compile(`(\d{8})$`)
+ if err != nil {
+ return "", err
+ }
+
+ // Find the match
+ matches := re.FindStringSubmatch(input)
+
+ // If a match is found, return it
+ if len(matches) > 1 {
+ return matches[1], nil
+ }
+
+ // Return empty string if no match
+ return "", nil
+}
+
+// Start begins the maintenance routine
+func (m *Manager) Start(ctx context.Context) error {
+ if m.config.SampleRate <= 0 {
+ if err := m.Maintain(ctx); err != nil {
+ m.logger.Error("an error occurred while running maintenance", "error", err)
+ }
+ }
+
+ m.wg.Add(1)
+ go func() {
+ defer m.wg.Done()
+ ticker := time.NewTicker(m.config.SampleRate)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-m.stop:
+ return
+ case <-ticker.C:
+ if err := m.Maintain(ctx); err != nil {
+ m.logger.Error("an error occurred while running maintenance", "error", err)
+ }
+ }
+ }
+ }()
+ return nil
+}
+
+// Stop gracefully stops the maintenance routine; used for testing
+func (m *Manager) Stop() {
+ close(m.stop)
+ m.wg.Wait()
+}
+
+// generateTableKey creates a unique key for a table based on its name and tenant ID
+func generateTableKey(tableName, tenantID string) string {
+ if tenantID != "" {
+ return fmt.Sprintf("%s_%s", tableName, tenantID)
+ }
+ return tableName
+}
+
+// importExistingPartitions scans the database for existing partitions and adds them to the partition management table
+func (m *Manager) importExistingPartitions(ctx context.Context, tc Table) error {
+ errString := make([]string, 0)
+
+ // Query to get all tables that look like partitions but aren't yet managed
+ type unManagedPartition struct {
+ TenantFrom null.String `db:"tenant_from"`
+ TenantTo null.String `db:"tenant_to"`
+ TimestampFrom string `db:"timestamp_from"`
+ TimestampTo string `db:"timestamp_to"`
+ PartitionName string `db:"partition_name"`
+ PartitionExpr string `db:"partition_expression"`
+ ParentSchema string `db:"parent_schema"`
+ ParentTable string `db:"parent_table"`
+ }
+
+ var unManagedPartitions []unManagedPartition
+ if err := m.db.SelectContext(ctx, &unManagedPartitions, findUnmanagedPartitionsQuery, tc.Schema, tc.Name); err != nil {
+ return fmt.Errorf("failed to query unmanaged partitions: %w", err)
+ }
+
+ // Process unmanaged partitions
+ for _, p := range unManagedPartitions {
+ // Create tenant from imported partition if tenant ID exists
+ if len(p.TenantFrom.String) > 0 {
+ tenant := Tenant{
+ TableName: tc.Name,
+ TableSchema: tc.Schema,
+ TenantId: p.TenantFrom.String,
+ }
+
+ m.logger.Info("creating tenant from imported partition", "table_id", tc.Id)
+
+ // Register the tenant
+ _, err := m.db.ExecContext(ctx, insertTenantSQL, tenant.TenantId, tc.Id)
+ if err != nil {
+ m.logger.Error("failed to register tenant from imported partition",
+ "partition", p.PartitionName,
+ "table", tenant.TableName,
+ "tenant", tenant.TenantId,
+ "error", err)
+ errString = append(errString, err.Error())
+ continue
+ }
+
+ m.logger.Info("registered tenant from imported partition",
+ "table", tenant.TableName,
+ "schema", tenant.TableSchema,
+ "tenant", tenant.TenantId)
+ }
+
+ // check to see if the date part exists
+ datePart, err := extractDateFromString(p.PartitionName)
+ if err != nil {
+ errString = append(errString, err.Error())
+ continue
+ }
+
+ _, err = time.Parse(DateNoHyphens, datePart)
+ if err != nil {
+ errString = append(errString, err.Error())
+ continue
+ }
+
+ parts := strings.Split(p.PartitionName, "_")
+ if len(parts) < 2 {
+ errString = append(errString, fmt.Sprintf("invalid partition name: %s", p.PartitionName))
+ continue
+ }
+
+ from, err := time.Parse(time.RFC3339, p.TimestampFrom)
+ if err != nil {
+ errString = append(errString, fmt.Sprintf("failed to parse from timestamp: %v", err))
+ continue
+ }
+
+ to, err := time.Parse(time.RFC3339, p.TimestampTo)
+ if err != nil {
+ errString = append(errString, fmt.Sprintf("failed to parse to timestamp: %v", err))
+ continue
+ }
+
+ partition := Partition{
+ Name: p.PartitionName,
+ Bounds: Bounds{From: from, To: to},
+ TenantId: p.TenantFrom.String,
+ ParentTable: tc,
+ }
+
+ err = m.checkTableColumnsExist(ctx, tc, p.TenantFrom.String)
+ if err != nil {
+ errString = append(errString, err.Error())
+ continue
+ }
+
+ mTable := partition.toManagedTable()
+
+ // Insert into partition management table
+ res, err := m.db.ExecContext(ctx, upsertSQL,
+ ulid.Make().String(),
+ tc.Id,
+ mTable.TenantID,
+ mTable.PartitionBy,
+ mTable.PartitionType,
+ mTable.PartitionBoundsFrom,
+ mTable.PartitionBoundsTo,
+ )
+ if err != nil {
+ m.logger.Error("failed to insert management entry",
+ "partition", p.PartitionName,
+ "table", p.ParentTable,
+ "tenant", p.TenantFrom,
+ "error", err)
+ errString = append(errString, err.Error())
+ continue
+ }
+
+ rowsAffected, err := res.RowsAffected()
+ if err != nil {
+ m.logger.Error("failed to get rows affected ",
+ "partition", p.PartitionName,
+ "table", p.ParentTable,
+ "tenant", p.TenantFrom,
+ "error", err)
+ errString = append(errString, err.Error())
+ continue
+ }
+
+ if rowsAffected > 0 {
+ m.logger.Info("imported existing partitioned table ",
+ "partition", p.PartitionName,
+ "table ", p.ParentTable,
+ "tenant ", p.TenantFrom)
+
+ // Add to our map of unique tables
+ key := buildTableName(mTable.SchemaName, mTable.TableName, mTable.TenantID)
+ m.addToPartitionMap(key, partition)
+ }
+ }
+
+ if len(errString) > 0 {
+ return errors.New(strings.Join(errString, "; "))
+ }
+ return nil
+}
+
+func (m *Manager) GetManagedTables(ctx context.Context) ([]uiManagedTableInfo, error) {
+ var tables []uiManagedTableInfo
+ err := m.db.SelectContext(ctx, &tables, getManagedTablesListQuery)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get managed tables: %w", err)
+ }
+ return tables, nil
+}
+
+func (m *Manager) GetPartitions(ctx context.Context, schema, tableName string, limit, offset int) ([]uiPartitionInfo, error) {
+ pattern := fmt.Sprintf("%s_%%", tableName)
+ var partitions []uiPartitionInfo
+ err := m.db.SelectContext(ctx, &partitions, getPartitionDetailsQuery, schema, pattern, limit, offset)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get partitions: %w", err)
+ }
+ return partitions, nil
+}
+
+func (m *Manager) GetParentTableInfo(ctx context.Context, schema, tableName string) (*uiParentTableInfo, error) {
+ var info uiParentTableInfo
+ err := m.db.GetContext(ctx, &info, getParentTableInfoQuery, schema, tableName)
+ if err != nil {
+ return nil, err
+ }
+ return &info, nil
+}
+
+// CreateParentTable registers a parent table for partitioning
+func (m *Manager) CreateParentTable(ctx context.Context, table Table) (string, error) {
+ if err := table.Validate(); err != nil {
+ return "", fmt.Errorf("invalid parent table configuration: %w", err)
+ }
+
+ // Check if table columns exist
+ if err := m.checkTableColumnsExist(ctx, table, ""); err != nil {
+ return "", fmt.Errorf("table validation failed: %w", err)
+ }
+
+ parentTableId := struct {
+ Id string `db:"id"`
+ }{}
+ // Insert or update parent table configuration
+ err := m.db.QueryRowxContext(ctx, upsertParentTableSQL,
+ ulid.Make().String(),
+ table.Name,
+ table.Schema,
+ table.TenantIdColumn,
+ table.PartitionBy,
+ table.PartitionType,
+ table.PartitionInterval.String(),
+ table.PartitionCount,
+ table.RetentionPeriod.String(),
+ ).StructScan(&parentTableId)
+ if err != nil {
+ return "", fmt.Errorf("failed to upsert parent table config for %s: %w", table.Name, err)
+ }
+
+ m.logger.Info("created parent table", "table", table.Name, "schema", table.Schema)
+
+ return parentTableId.Id, nil
+}
+
+// RegisterTenant registers a tenant for an existing parent table
+func (m *Manager) RegisterTenant(ctx context.Context, tenant Tenant) (*TenantRegistrationResult, error) {
+ if err := tenant.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid tenant configuration: %w", err)
+ }
+
+ result := &TenantRegistrationResult{
+ TenantId: tenant.TenantId,
+ TableName: tenant.TableName,
+ TableSchema: tenant.TableSchema,
+ PartitionsCreated: 0,
+ ExistingPartitionsImported: 0,
+ Errors: []error{},
+ }
+
+ // Get parent table configuration
+ var parentTableData struct {
+ Id string `db:"id"`
+ TableName string `db:"table_name"`
+ SchemaName string `db:"schema_name"`
+ TenantColumn string `db:"tenant_column"`
+ PartitionBy string `db:"partition_by"`
+ PartitionType string `db:"partition_type"`
+ PartitionInterval string `db:"partition_interval"`
+ PartitionCount uint `db:"partition_count"`
+ RetentionPeriod string `db:"retention_period"`
+ }
+
+ err := m.db.GetContext(ctx, &parentTableData, getParentTableQuery, tenant.TableName, tenant.TableSchema)
+ if err != nil {
+ result.Errors = append(result.Errors, fmt.Errorf("parent table not found: %w", err))
+ return result, nil
+ }
+
+ // Convert to Table
+ interval, err := time.ParseDuration(parentTableData.PartitionInterval)
+ if err != nil {
+ result.Errors = append(result.Errors, fmt.Errorf("failed to parse partition interval: %w", err))
+ return result, nil
+ }
+
+ retention, err := time.ParseDuration(parentTableData.RetentionPeriod)
+ if err != nil {
+ result.Errors = append(result.Errors, fmt.Errorf("failed to parse retention period: %w", err))
+ return result, nil
+ }
+
+ parentTable := Table{
+ Id: parentTableData.Id,
+ Name: parentTableData.TableName,
+ Schema: parentTableData.SchemaName,
+ TenantIdColumn: parentTableData.TenantColumn,
+ PartitionBy: parentTableData.PartitionBy,
+ PartitionType: PartitionerType(parentTableData.PartitionType),
+ PartitionInterval: interval,
+ PartitionCount: parentTableData.PartitionCount,
+ RetentionPeriod: retention,
+ }
+
+ // Insert tenant
+ _, err = m.db.ExecContext(ctx, insertTenantSQL,
+ tenant.TenantId,
+ parentTable.Id,
+ )
+ if err != nil {
+ result.Errors = append(result.Errors, fmt.Errorf("failed to insert tenant: %w", err))
+ return result, nil
+ }
+
+ // Create future partitions
+ if err = m.CreateFuturePartitions(ctx, parentTable); err != nil {
+ result.Errors = append(result.Errors, fmt.Errorf("failed to create future partitions: %w", err))
+ } else {
+ result.PartitionsCreated = int(parentTable.PartitionCount)
+ }
+
+ // Import existing partitions
+ if err = m.importExistingPartitions(ctx, parentTable); err != nil {
+ result.Errors = append(result.Errors, fmt.Errorf("failed to import existing partitions: %w", err))
+ } else {
+ // Count existing partitions (this is a rough estimate)
+ result.ExistingPartitionsImported = 1 // We'll improve this later
+ }
+
+ m.logger.Info("registered tenant", "tenant", tenant.TenantId, "table", tenant.TableName)
+ return result, nil
+}
+
+// RegisterTenants registers multiple tenants for an existing parent table (new API)
+func (m *Manager) RegisterTenants(ctx context.Context, tenants ...Tenant) ([]TenantRegistrationResult, error) {
+ tx, err := m.db.BeginTxx(ctx, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to start transaction: %w", err)
+ }
+ defer tx.Rollback()
+
+ results := make([]TenantRegistrationResult, 0, len(tenants))
+
+ for _, tenant := range tenants {
+ // TODO: pass the db transaction via the context
+ result, err := m.RegisterTenant(ctx, tenant)
+ if err != nil {
+ return results, err
+ }
+ results = append(results, *result)
+ }
+
+ if err := tx.Commit(); err != nil {
+ return nil, fmt.Errorf("failed to commit transaction: %w", err)
+ }
+
+ return results, nil
+}
+
+// GetParentTables returns all registered parent tables
+func (m *Manager) GetParentTables(ctx context.Context) ([]Table, error) {
+ var parentTables []struct {
+ TableName string `db:"table_name"`
+ SchemaName string `db:"schema_name"`
+ TenantColumn string `db:"tenant_column"`
+ PartitionBy string `db:"partition_by"`
+ PartitionType string `db:"partition_type"`
+ PartitionInterval string `db:"partition_interval"`
+ PartitionCount uint `db:"partition_count"`
+ RetentionPeriod string `db:"retention_period"`
+ }
+
+ err := m.db.SelectContext(ctx, &parentTables, getParentTablesQuery)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get parent tables: %w", err)
+ }
+
+ result := make([]Table, 0, len(parentTables))
+ for _, pt := range parentTables {
+ interval, innerErr := time.ParseDuration(pt.PartitionInterval)
+ if innerErr != nil {
+ m.logger.Error("failed to parse partition interval", "error", innerErr, "table", pt.TableName)
+ continue
+ }
+
+ retention, innerErr := time.ParseDuration(pt.RetentionPeriod)
+ if innerErr != nil {
+ m.logger.Error("failed to parse retention period", "error", innerErr, "table", pt.TableName)
+ continue
+ }
+
+ result = append(result, Table{
+ Name: pt.TableName,
+ Schema: pt.SchemaName,
+ TenantIdColumn: pt.TenantColumn,
+ PartitionBy: pt.PartitionBy,
+ PartitionType: PartitionerType(pt.PartitionType),
+ PartitionInterval: interval,
+ PartitionCount: pt.PartitionCount,
+ RetentionPeriod: retention,
+ })
+ }
+
+ return result, nil
+}
+
+// GetTenants returns all tenants for a specific parent table
+func (m *Manager) GetTenants(ctx context.Context, parentTableSchema, parentTableName string) ([]Tenant, error) {
+ var tenants []struct {
+ ParentTableId string `db:"parent_table_id"`
+ TenantId string `db:"tenant_id"`
+ }
+
+ err := m.db.SelectContext(ctx, &tenants, getTenantsQuery, parentTableName, parentTableSchema)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch tenants: %w", err)
+ }
+
+ m.logger.Info("fetched tenants", "table", parentTableName, "schema", parentTableSchema, "count", len(tenants))
+
+ result := make([]Tenant, 0, len(tenants))
+ for _, t := range tenants {
+ result = append(result, Tenant{
+ TableName: parentTableName,
+ TableSchema: parentTableSchema,
+ TenantId: t.TenantId,
+ })
+ }
+
+ return result, nil
+}
+
+func (m *Manager) addToPartitionMap(key tableName, partition Partition) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.partitions[key] = partition
+}
+
+func (m *Manager) removePartitionFromMap(key tableName) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ delete(m.partitions, key)
+}
diff --git a/vendor/github.com/jirevwe/go_partman/queries.go b/vendor/github.com/jirevwe/go_partman/queries.go
new file mode 100644
index 0000000000..ee6ce3aa93
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/queries.go
@@ -0,0 +1,283 @@
+package partman
+
+var createPartmanSchema = `CREATE SCHEMA IF NOT EXISTS partman;`
+
+var createParentsTable = `
+CREATE TABLE IF NOT EXISTS partman.parent_tables (
+ id VARCHAR PRIMARY KEY,
+ schema_name VARCHAR NOT NULL,
+ table_name VARCHAR NOT NULL,
+ tenant_column VARCHAR,
+ partition_by VARCHAR NOT NULL,
+ partition_type VARCHAR NOT NULL,
+ partition_interval VARCHAR NOT NULL,
+ partition_count INT NOT NULL DEFAULT 10,
+ retention_period VARCHAR NOT NULL,
+ created_at timestamptz DEFAULT CURRENT_TIMESTAMP,
+ updated_at timestamptz DEFAULT CURRENT_TIMESTAMP,
+ UNIQUE(schema_name, table_name)
+);`
+
+var createTenantsTable = `
+CREATE TABLE IF NOT EXISTS partman.tenants (
+ id VARCHAR NOT NULL,
+ parent_table_id VARCHAR NOT NULL,
+ created_at timestamptz DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY (parent_table_id, id),
+ FOREIGN KEY (parent_table_id)
+ REFERENCES partman.parent_tables(id) ON DELETE CASCADE,
+ UNIQUE(parent_table_id, id)
+);`
+
+var createPartitionsTable = `
+CREATE TABLE IF NOT EXISTS partman.partitions (
+ id VARCHAR PRIMARY KEY,
+ parent_table_id VARCHAR NOT NULL,
+ tenant_id VARCHAR,
+ partition_by VARCHAR NOT NULL,
+ partition_type VARCHAR NOT NULL,
+ partition_bounds_from TIMESTAMPTZ NOT NULL,
+ partition_bounds_to TIMESTAMPTZ NOT NULL,
+ created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
+ updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (parent_table_id)
+ REFERENCES partman.parent_tables(id) ON DELETE CASCADE,
+ FOREIGN KEY (parent_table_id, tenant_id)
+ REFERENCES partman.tenants(parent_table_id, id) ON DELETE CASCADE,
+ UNIQUE(parent_table_id, tenant_id, partition_bounds_from, partition_bounds_to)
+);`
+
+var createValidateTenantFunction = `
+CREATE OR REPLACE FUNCTION partman.validate_tenant_id() RETURNS TRIGGER AS $$
+BEGIN
+ -- If tenant_id is provided, ensure it exists for this parent table
+ IF NEW.tenant_id IS NOT NULL THEN
+ IF NOT EXISTS (
+ SELECT 1 FROM partman.tenants
+ WHERE parent_table_id = NEW.parent_table_id
+ AND id = NEW.tenant_id
+ ) THEN
+ RAISE EXCEPTION 'Tenant % does not exist for parent table %',
+ NEW.tenant_id, NEW.parent_table_id;
+ END IF;
+ END IF;
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;`
+
+var createTriggerOnPartitionInsert = `
+CREATE OR REPLACE TRIGGER validate_tenant_id_trigger
+ BEFORE INSERT OR UPDATE ON partman.partitions
+ FOR EACH ROW EXECUTE FUNCTION partman.validate_tenant_id()`
+
+var upsertSQL = `
+INSERT INTO partman.partitions (
+ id, parent_table_id, tenant_id, partition_by, partition_type,
+ partition_bounds_from, partition_bounds_to
+) VALUES ($1, $2, $3, $4, $5, $6, $7)
+ON CONFLICT DO NOTHING;`
+
+var getlatestPartition = `
+SELECT tablename
+FROM pg_tables
+WHERE schemaname = $1 AND tablename LIKE $2
+ORDER BY tablename DESC
+LIMIT 1;`
+
+// todo(raymond): paginate this query?
+var getManagedTablesRetentionPeriods = `
+SELECT pt.table_name, pt.schema_name, p.tenant_id, pt.retention_period
+FROM partman.partitions p
+join partman.parent_tables pt on pt.id = p.parent_table_id;`
+
+var getPartitionExists = `
+SELECT EXISTS (
+ SELECT 1
+ FROM pg_tables
+ WHERE schemaname = $1 AND tablename = $2
+);`
+
+var partitionsQuery = `
+SELECT tablename
+FROM pg_tables
+WHERE schemaname = $1 AND tablename ILIKE $2;`
+
+var dropPartition = `DROP TABLE IF EXISTS %s.%s;`
+
+var generatePartitionQuery = `CREATE TABLE IF NOT EXISTS %s.%s PARTITION OF %s.%s FOR VALUES FROM ('%s 00:00:00+00'::timestamptz) TO ('%s 00:00:00+00'::timestamptz);`
+
+var generatePartitionWithTenantIdQuery = `CREATE TABLE IF NOT EXISTS %s.%s PARTITION OF %s.%s FOR VALUES FROM ('%s', '%s 00:00:00+00'::timestamptz) TO ('%s', '%s 00:00:00+00'::timestamptz);`
+
+var checkColumnExists = `
+SELECT EXISTS (SELECT 1
+FROM information_schema.columns
+WHERE table_schema=$1 AND table_name=$2 AND column_name=$3);`
+
+var getManagedTablesQuery = `
+SELECT
+ pt.table_name,
+ pt.schema_name,
+ p.tenant_id,
+ pt.tenant_column,
+ pt.partition_by,
+ pt.partition_type,
+ p.partition_bounds_from,
+ p.partition_bounds_to
+FROM partman.partitions p
+join partman.parent_tables pt on p.parent_table_id = pt.id;`
+
+var getPartitionDetailsQuery = `
+WITH partition_info AS (
+ SELECT
+ t.tablename as name,
+ pg_total_relation_size(quote_ident($1) || '.' || quote_ident(t.tablename)) as size_bytes,
+ (SELECT reltuples::bigint FROM pg_class WHERE oid = (quote_ident($1) || '.' || quote_ident(t.tablename))::regclass) as rows,
+ p.created_at as created
+ FROM pg_tables t
+ LEFT JOIN partman.parent_tables pt ON pt.schema_name = t.schemaname AND pt.table_name = $2
+ LEFT JOIN partman.partitions p ON p.parent_table_id = pt.id
+ WHERE t.schemaname = $1 AND t.tablename LIKE $2 || '_%'
+ ORDER BY t.tablename DESC
+ LIMIT $3 OFFSET $4
+)
+SELECT
+ name,
+ pg_size_pretty(size_bytes) as size,
+ rows,
+ COALESCE(to_char(created, 'YYYY-MM-DD HH24:MI:SS'), '') as created,
+ size_bytes,
+ (SELECT COUNT(*) FROM pg_tables WHERE schemaname = $1 AND tablename LIKE $2 || '_%') as total_count
+FROM partition_info;`
+
+var getParentTableInfoQuery = `
+WITH parent_table_info AS (
+ SELECT
+ schemaname,
+ tablename,
+ (SELECT COUNT(*) FROM pg_tables WHERE schemaname = $1 AND tablename LIKE $2 || '_%') as partition_count
+ FROM pg_tables
+ WHERE schemaname = $1 AND tablename = $2
+),
+partition_sizes AS (
+ SELECT
+ schemaname,
+ tablename,
+ pg_total_relation_size(quote_ident(schemaname) || '.' || quote_ident(tablename)) as size_bytes,
+ (SELECT reltuples::bigint FROM pg_class WHERE oid = (quote_ident(schemaname) || '.' || quote_ident(tablename))::regclass) as estimated_rows
+ FROM pg_tables
+ WHERE schemaname = $1 AND tablename LIKE $2 || '_%'
+),
+totals AS (
+ SELECT
+ COALESCE(SUM(size_bytes), 0) as total_size_bytes,
+ COALESCE(SUM(estimated_rows), 0) as total_rows
+ FROM partition_sizes
+)
+SELECT
+ pti.tablename as name,
+ pg_size_pretty(t.total_size_bytes) as total_size,
+ t.total_rows as total_rows,
+ pti.partition_count as partition_count,
+ t.total_size_bytes as total_size_bytes
+FROM parent_table_info pti
+CROSS JOIN totals t;`
+
+var getManagedTablesListQuery = `
+SELECT DISTINCT table_name, schema_name
+FROM partman.parent_tables
+ORDER BY table_name;`
+
+var upsertParentTableSQL = `
+INSERT INTO partman.parent_tables (
+ id, table_name, schema_name,
+ tenant_column, partition_by,
+ partition_type, partition_interval,
+ partition_count, retention_period
+) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
+ON CONFLICT (schema_name, table_name)
+DO UPDATE SET updated_at = current_timestamp
+RETURNING id;`
+
+var insertTenantSQL = `
+INSERT INTO partman.tenants (id, parent_table_id)
+VALUES ($1, $2)
+ON CONFLICT DO NOTHING;`
+
+var getParentTablesQuery = `
+SELECT
+ table_name,
+ schema_name,
+ tenant_column,
+ partition_by,
+ partition_type,
+ partition_interval,
+ partition_count,
+ retention_period
+FROM partman.parent_tables
+ORDER BY table_name;`
+
+var getTenantsQuery = `
+SELECT t.id as tenant_id, pt.id as parent_table_id
+FROM partman.tenants t
+JOIN partman.parent_tables pt ON pt.id = t.parent_table_id
+WHERE pt.table_name = $1 AND pt.schema_name = $2
+ORDER BY t.id;`
+
+var getParentTableQuery = `
+SELECT
+ id,
+ table_name,
+ schema_name,
+ tenant_column,
+ partition_by,
+ partition_type,
+ partition_interval,
+ partition_count,
+ retention_period
+FROM partman.parent_tables
+WHERE table_name = $1 AND schema_name = $2;`
+
+var findUnmanagedPartitionsQuery = `
+WITH bounds AS (
+SELECT
+ nmsp_parent.nspname AS parent_schema,
+ parent.relname AS parent_table,
+ nmsp_child.nspname AS partition_schema,
+ child.relname AS partition_name,
+ pg_get_expr(child.relpartbound, child.oid) AS partition_expression
+FROM pg_inherits
+ JOIN pg_class parent ON pg_inherits.inhparent = parent.oid
+ JOIN pg_class child ON pg_inherits.inhrelid = child.oid
+ JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace
+ JOIN pg_namespace nmsp_child ON nmsp_child.oid = child.relnamespace
+WHERE parent.relkind = 'p' AND nmsp_parent.nspname = $1 and parent.relname = $2
+),
+ parsed_values AS (
+ SELECT
+ *,
+ regexp_matches(partition_expression, 'FROM \(([^)]+)\) TO \(([^)]+)\)', 'g') as extracted_values,
+ (regexp_matches(partition_expression, 'FROM \(([^)]+)\)', 'g'))[1] as from_values,
+ (regexp_matches(partition_expression, 'TO \(([^)]+)\)', 'g'))[1] as to_values
+ FROM bounds
+ )
+SELECT
+ parent_schema,
+ parent_table,
+ partition_name,
+ partition_expression,
+ CASE
+ WHEN from_values LIKE '%,%' THEN replace(split_part(from_values, ', ', 1), '''', '')
+ END as tenant_from,
+ (CASE
+ WHEN from_values LIKE '%,%' THEN split_part(from_values, ', ', 2)
+ ELSE from_values
+ END)::TIMESTAMP as timestamp_from,
+ CASE
+ WHEN to_values LIKE '%,%' THEN replace(split_part(to_values, ', ', 1), '''', '')
+ END as tenant_to,
+ (CASE
+ WHEN to_values LIKE '%,%' THEN split_part(to_values, ', ', 2)
+ ELSE to_values
+ END)::TIMESTAMP as timestamp_to
+FROM parsed_values;
+`
diff --git a/vendor/github.com/jirevwe/go_partman/type.go b/vendor/github.com/jirevwe/go_partman/type.go
new file mode 100644
index 0000000000..009ee375f2
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/type.go
@@ -0,0 +1,394 @@
+package partman
+
+import (
+ "context"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/jmoiron/sqlx"
+)
+
+type tableName string
+
+func buildTableName(schema string, table string, tenantId string) tableName {
+ if schema == "" {
+ schema = "public"
+ }
+ if tenantId != "" && len(tenantId) > 0 {
+ return tableName(fmt.Sprintf("%s.%s_%s", schema, table, tenantId))
+ }
+ return tableName(fmt.Sprintf("%s.%s", schema, table))
+}
+
+var (
+ ErrHookMustNotBeNil = errors.New("[partition manager] hook must not be nil")
+ ErrClockMustNotBeNil = errors.New("[partition manager] clock must not be nil")
+ ErrLoggerMustNotBeNil = errors.New("[partition manager] logger must not be nil")
+ ErrConfigMustNotBeNil = errors.New("[partition manager] config must not be nil")
+ ErrDbDriverMustNotBeNil = errors.New("[partition manager] db driver must not be nil")
+)
+
+// Hook a hook func executes any necessary operations before dropping a partition
+// Example hooks:
+// 1. Export data to cold storage
+// 2. Create backup
+// 3. Send notifications
+// 4. Update metrics
+type Hook func(ctx context.Context, partition string) error
+
+type Option func(m *Manager) error
+
+// WithDB function to set the database
+func WithDB(db *sqlx.DB) Option {
+ return func(m *Manager) error {
+ if db == nil {
+ return ErrDbDriverMustNotBeNil
+ }
+
+ m.db = db
+ return nil
+ }
+}
+
+// WithLogger function to set the logger
+func WithLogger(logger Logger) Option {
+ return func(m *Manager) error {
+ if logger == nil {
+ return ErrLoggerMustNotBeNil
+ }
+
+ m.logger = logger
+ return nil
+ }
+}
+
+// WithConfig function to set the config
+func WithConfig(config *Config) Option {
+ return func(m *Manager) error {
+ if config == nil {
+ return ErrConfigMustNotBeNil
+ }
+
+ if err := config.Validate(); err != nil {
+ return err
+ }
+
+ m.config = config
+ return nil
+ }
+}
+
+// WithClock function to set the clock
+func WithClock(clock Clock) Option {
+ return func(m *Manager) error {
+ if clock == nil {
+ return ErrClockMustNotBeNil
+ }
+
+ m.clock = clock
+ return nil
+ }
+}
+
+// WithHook function to set the hook
+func WithHook(hook Hook) Option {
+ return func(m *Manager) error {
+ if hook == nil {
+ return ErrHookMustNotBeNil
+ }
+
+ m.hook = hook
+ return nil
+ }
+}
+
+type TimeDuration time.Duration
+
+func (t *TimeDuration) Scan(value interface{}) error {
+ s, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("unsupported value type %T", value)
+ }
+
+ td, err := time.ParseDuration(s)
+ if err != nil {
+ return err
+ }
+
+ *t = TimeDuration(td)
+
+ return nil
+}
+
+func (t TimeDuration) Value() (driver.Value, error) {
+ duration := time.Duration(t)
+ if duration == 0 {
+ return nil, nil
+ }
+ return duration.String(), nil
+}
+
+type PartitionerType string
+
+const (
+ TypeRange PartitionerType = "range"
+)
+
+const (
+ DateNoHyphens = "20060102"
+)
+
+type Partitioner interface {
+ // CreateFuturePartitions Create new partitions ahead of time
+ CreateFuturePartitions(ctx context.Context, tableConfig Table) error
+
+ // DropOldPartitions Drop old partitions based on retention policy
+ DropOldPartitions(ctx context.Context) error
+
+ // Maintain defines a regularly run maintenance routine
+ Maintain(ctx context.Context) error
+
+ // AddManagedTable adds a new managed table to the partition manager
+ AddManagedTable(tc Table) error
+
+ // importExistingPartitions scans the database for existing partitions and adds them to the partition management table
+ importExistingPartitions(ctx context.Context, tc Table) error
+
+ // CreateParentTable registers a parent table for partitioning (new API)
+ CreateParentTable(ctx context.Context, parentTable Table) error
+
+ // RegisterTenant registers a tenant for an existing parent table (new API)
+ RegisterTenant(ctx context.Context, tenant Tenant) (*TenantRegistrationResult, error)
+
+ // RegisterTenants registers multiple tenants for an existing parent table (new API)
+ RegisterTenants(ctx context.Context, tenants []Tenant) ([]TenantRegistrationResult, error)
+
+ // GetParentTables returns all registered parent tables
+ GetParentTables(ctx context.Context) ([]Table, error)
+
+ // GetTenants returns all tenants for a specific parent table
+ GetTenants(ctx context.Context, parentTableName, parentTableSchema string) ([]Tenant, error)
+}
+
+type Bounds struct {
+ From, To time.Time
+}
+
+type D struct {
+ Key string
+ Value string
+}
+
+type Partition struct {
+ // Name the name of the partition within the partitioned table.
+ Name string
+
+ // ParentTable the metadata and configuration of the parent table for the partition.
+ ParentTable Table
+
+ // Bounds the range of time covered by the partition, defined by start (From) and end (To) timestamps.
+ Bounds Bounds
+
+ // TenantId the tenant ID column value (e.g., 01J2V010NV1259CYWQEYQC8F35)
+ TenantId string
+}
+
+func (p *Partition) toManagedTable() managedTable {
+ return managedTable{
+ TableName: p.Name,
+ SchemaName: p.ParentTable.Schema,
+ TenantID: p.TenantId,
+ TenantColumn: p.ParentTable.TenantIdColumn,
+ PartitionBy: p.ParentTable.PartitionBy,
+ PartitionType: string(p.ParentTable.PartitionType),
+ PartitionBoundsFrom: p.Bounds.From,
+ PartitionBoundsTo: p.Bounds.To,
+ }
+}
+
+type Table struct {
+ Id string
+
+ // Name of the partitioned table
+ Name string
+
+ // Schema of the partitioned table
+ Schema string
+
+ // TenantIdColumn Tenant ID column to partition by (e.g., tenant_id)
+ TenantIdColumn string
+
+ // PartitionBy Timestamp column to partition by (e.g., created_at)
+ PartitionBy string
+
+ // PartitionType Postgres partition type
+ PartitionType PartitionerType // "range", "list", or "hash"
+
+ // PartitionInterval For range partitions (e.g., "1 month", "1 day")
+ PartitionInterval time.Duration
+
+ // PartitionCount is the number of partitions a table will have; defaults to 10
+ PartitionCount uint
+
+ // RetentionPeriod is how long after which partitions will be dropped (e.g., "1 month", "1 day")
+ RetentionPeriod time.Duration
+}
+
+type Config struct {
+ // SampleRate is how often the internal ticker runs
+ SampleRate time.Duration
+
+ // Tables holds parent table configurations
+ Tables []Table
+}
+
+// Validate checks if the configuration is valid
+func (c *Config) Validate() error {
+ // Validate legacy Tables API
+ for i, table := range c.Tables {
+ if err := table.Validate(); err != nil {
+ return fmt.Errorf("table[%d]: %w", i, err)
+ }
+ }
+
+ return nil
+}
+
+// Validate checks if the table configuration is valid
+func (tc *Table) Validate() error {
+ if tc.Name == "" {
+ return errors.New("name cannot be empty")
+ }
+
+ if tc.Schema == "" {
+ return errors.New("schema name cannot be empty")
+ }
+
+ if tc.RetentionPeriod == 0 {
+ return errors.New("retention period must be set")
+ }
+
+ // set default value
+ if tc.PartitionCount == 0 {
+ tc.PartitionCount = 10
+ }
+
+ if tc.PartitionType == TypeRange {
+ if len(tc.PartitionBy) == 0 {
+ return errors.New("partition_by is required for range partitions")
+ }
+
+ if tc.PartitionInterval == 0 {
+ return errors.New("partition interval must be set for range partitions")
+ }
+ }
+
+ return nil
+}
+
+type StringArray []string
+
+func (a *StringArray) Scan(src interface{}) error {
+ if src == nil {
+ return nil
+ }
+
+ var array []string
+
+ switch v := src.(type) {
+ case string:
+ array = append(array, v)
+ case []string:
+ array = v
+ }
+
+ *a = array
+
+ return nil
+}
+
+type managedTable struct {
+ TableName string `db:"table_name"`
+ SchemaName string `db:"schema_name"`
+ TenantID string `db:"tenant_id"`
+ TenantColumn string `db:"tenant_column"`
+ PartitionBy string `db:"partition_by"`
+ PartitionType string `db:"partition_type"`
+ PartitionBoundsFrom time.Time `db:"partition_bounds_from"`
+ PartitionBoundsTo time.Time `db:"partition_bounds_to"`
+}
+
+type uiPartitionInfo struct {
+ Name string `json:"name" db:"name"`
+ Size string `json:"size" db:"size"`
+ Rows int64 `json:"rows" db:"rows"`
+ Range string `json:"range" db:"range"`
+ Created string `json:"created" db:"created"`
+ SizeBytes int64 `json:"size_bytes" db:"size_bytes"`
+ TotalCount int64 `json:"total_count" db:"total_count"`
+}
+
+type uiParentTableInfo struct {
+ Name string `json:"name" db:"name"`
+ TotalSize string `json:"total_size" db:"total_size"`
+ TotalRows int64 `json:"total_rows" db:"total_rows"`
+ PartitionCount int `json:"partition_count" db:"partition_count"`
+ TotalSizeBytes int64 `json:"total_size_bytes" db:"total_size_bytes"`
+}
+
+type uiManagedTableInfo struct {
+ Name string `json:"name" db:"table_name"`
+ Schema string `json:"schema" db:"schema_name"`
+}
+
+// Tenant represents a tenant configuration for a specific parent table
+type Tenant struct {
+ // ParentTableName references the parent table this tenant belongs to
+ TableName string
+
+ // ParentTableSchema references the parent table schema
+ TableSchema string
+
+ // TenantId Tenant ID column value (e.g., 01J2V010NV1259CYWQEYQC8F35)
+ TenantId string
+}
+
+// TenantRegistrationResult contains the result of registering a tenant
+type TenantRegistrationResult struct {
+ // TenantId the tenant ID that was registered
+ TenantId string
+
+ // TableName the parent table name
+ TableName string
+
+ // TableSchema the parent table schema
+ TableSchema string
+
+ // PartitionsCreated number of partitions created for this tenant
+ PartitionsCreated int
+
+ // ExistingPartitionsImported number of existing partitions imported
+ ExistingPartitionsImported int
+
+ // Errors any errors encountered during registration
+ Errors []error
+}
+
+// Validate checks if the tenant configuration is valid
+func (t *Tenant) Validate() error {
+ if t.TableSchema == "" {
+ return errors.New("table schema cannot be empty")
+ }
+
+ if t.TableName == "" {
+ return errors.New("table name cannot be empty")
+ }
+
+ if t.TenantId == "" {
+ return errors.New("tenant ID cannot be empty")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jirevwe/go_partman/ui.go b/vendor/github.com/jirevwe/go_partman/ui.go
new file mode 100644
index 0000000000..ba55936c26
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/ui.go
@@ -0,0 +1,313 @@
+package partman
+
+import (
+ "embed"
+ "encoding/json"
+ "io/fs"
+ "mime"
+ "net/http"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+//go:embed web/dist
+var uiFS embed.FS
+
+type apiHandler struct {
+ manager *Manager
+}
+
+func (h *apiHandler) handleGetTables(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ tables, err := h.manager.GetManagedTables(r.Context())
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ err = json.NewEncoder(w).Encode(map[string]interface{}{
+ "tables": tables,
+ })
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+}
+
+func (h *apiHandler) handleGetPartitions(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ tableName := r.URL.Query().Get("table")
+ if tableName == "" {
+ http.Error(w, "table parameter is required", http.StatusBadRequest)
+ return
+ }
+
+ schema := r.URL.Query().Get("schema")
+ if schema == "" {
+ // Default to the first table's schema if not provided
+ if len(h.manager.config.Tables) > 0 {
+ schema = h.manager.config.Tables[0].Schema
+ } else {
+ http.Error(w, "schema parameter is required", http.StatusBadRequest)
+ return
+ }
+ }
+
+ // Parse pagination parameters
+ limit := 10 // Default limit
+ offset := 0 // Default offset
+
+ if limitStr := r.URL.Query().Get("limit"); limitStr != "" {
+ parsedLimit, err := strconv.Atoi(limitStr)
+ if err != nil {
+ http.Error(w, "invalid limit parameter", http.StatusBadRequest)
+ return
+ }
+ limit = parsedLimit
+ }
+
+ if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" {
+ parsedOffset, err := strconv.Atoi(offsetStr)
+ if err != nil {
+ http.Error(w, "invalid offset parameter", http.StatusBadRequest)
+ return
+ }
+ offset = parsedOffset
+ }
+
+ partitions, err := h.manager.GetPartitions(r.Context(), schema, tableName, limit, offset)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ // Get parent table info
+ parentInfo, err := h.manager.GetParentTableInfo(r.Context(), schema, tableName)
+ if err != nil {
+ // Log error but don't fail the request
+ h.manager.logger.Error("failed to get parent table info", "error", err)
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ response := map[string]interface{}{
+ "partitions": partitions,
+ }
+ if parentInfo != nil {
+ response["parent_table"] = parentInfo
+ }
+
+ err = json.NewEncoder(w).Encode(response)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+}
+
+// UIHandler returns an http.Handler that serves the partition manager UI and API
+func UIHandler(manager *Manager) http.Handler {
+ fsys, err := fs.Sub(uiFS, "web/dist")
+ if err != nil {
+ panic(err)
+ }
+
+ api := &apiHandler{manager: manager}
+ mux := http.NewServeMux()
+
+ // API routes
+ mux.Handle("/api/tables", enforceJSONHandler(setupCORS(http.HandlerFunc(api.handleGetTables))))
+ mux.Handle("/api/partitions", enforceJSONHandler(setupCORS(http.HandlerFunc(api.handleGetPartitions))))
+
+ // UI routes - serve static files with proper MIME types
+ mux.Handle("/", setupCORS(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Handle API routes
+ if strings.HasPrefix(r.URL.Path, "/api/") {
+ http.NotFound(w, r)
+ return
+ }
+
+ // Serve static files
+ path := r.URL.Path
+ if path == "/" {
+ path = "/index.html"
+ }
+
+ // Remove leading slash for embedded files
+ if strings.HasPrefix(path, "/") {
+ path = path[1:]
+ }
+
+ // Get file from embedded filesystem
+ file, err := fsys.Open(path)
+ if err != nil {
+ // If file not found, serve index.html for SPA routing
+ if path != "index.html" {
+ indexFile, err := fsys.Open("index.html")
+ if err != nil {
+ http.NotFound(w, r)
+ return
+ }
+ defer indexFile.Close()
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ http.ServeFile(w, r, "index.html")
+ return
+ }
+ http.NotFound(w, r)
+ return
+ }
+ defer file.Close()
+
+ // Set proper MIME type
+ ext := filepath.Ext(path)
+ mimeType := mime.TypeByExtension(ext)
+ if mimeType == "" {
+ switch ext {
+ case ".js":
+ mimeType = "application/javascript"
+ case ".css":
+ mimeType = "text/css"
+ case ".html":
+ mimeType = "text/html; charset=utf-8"
+ case ".svg":
+ mimeType = "image/svg+xml"
+ case ".ico":
+ mimeType = "image/x-icon"
+ default:
+ mimeType = "application/octet-stream"
+ }
+ }
+ w.Header().Set("Content-Type", mimeType)
+
+ // Serve the file using http.FileServer
+ fileServer := http.FileServer(http.FS(fsys))
+ fileServer.ServeHTTP(w, r)
+ })))
+
+ return mux
+}
+
+// APIHandler returns an http.Handler that serves only the API endpoints
+// This allows users to mount the API on their own router
+func APIHandler(manager *Manager) http.Handler {
+ api := &apiHandler{manager: manager}
+ mux := http.NewServeMux()
+
+ // API routes
+ mux.Handle("/tables", enforceJSONHandler(setupCORS(http.HandlerFunc(api.handleGetTables))))
+ mux.Handle("/partitions", enforceJSONHandler(setupCORS(http.HandlerFunc(api.handleGetPartitions))))
+
+ return mux
+}
+
+// StaticHandler returns an http.Handler that serves only the static UI files
+// This allows users to serve the UI from their own static file server
+func StaticHandler() http.Handler {
+ fsys, err := fs.Sub(uiFS, "web/dist")
+ if err != nil {
+ panic(err)
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ path := r.URL.Path
+ if path == "/" {
+ path = "/index.html"
+ }
+
+ // Remove leading slash for embedded files
+ if strings.HasPrefix(path, "/") {
+ path = path[1:]
+ }
+
+ // Get file from embedded filesystem
+ file, err := fsys.Open(path)
+ if err != nil {
+ // If file not found, serve index.html for SPA routing
+ if path != "index.html" {
+ indexFile, err := fsys.Open("index.html")
+ if err != nil {
+ http.NotFound(w, r)
+ return
+ }
+ defer indexFile.Close()
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ http.ServeFile(w, r, "index.html")
+ return
+ }
+ http.NotFound(w, r)
+ return
+ }
+ defer file.Close()
+
+ // Set proper MIME type
+ ext := filepath.Ext(path)
+ mimeType := mime.TypeByExtension(ext)
+ if mimeType == "" {
+ switch ext {
+ case ".js":
+ mimeType = "application/javascript"
+ case ".css":
+ mimeType = "text/css"
+ case ".html":
+ mimeType = "text/html; charset=utf-8"
+ case ".svg":
+ mimeType = "image/svg+xml"
+ case ".ico":
+ mimeType = "image/x-icon"
+ default:
+ mimeType = "application/octet-stream"
+ }
+ }
+ w.Header().Set("Content-Type", mimeType)
+
+ // Serve the file using http.FileServer
+ fileServer := http.FileServer(http.FS(fsys))
+ fileServer.ServeHTTP(w, r)
+ })
+}
+
+func setupCORS(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Set CORS headers
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
+ w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
+
+ // Handle preflight requests
+ if r.Method == "OPTIONS" {
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+
+ next.ServeHTTP(w, r)
+ })
+}
+
+func enforceJSONHandler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ contentType := r.Header.Get("Content-Type")
+
+ if contentType != "" {
+ mt, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ http.Error(w, "Malformed Content-Type header", http.StatusBadRequest)
+ return
+ }
+
+ if mt != "application/json" {
+ http.Error(w, "Content-Type header must be application/json", http.StatusUnsupportedMediaType)
+ return
+ }
+ }
+
+ next.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/jirevwe/go_partman/web/dist/index.html b/vendor/github.com/jirevwe/go_partman/web/dist/index.html
new file mode 100644
index 0000000000..48cdce8528
--- /dev/null
+++ b/vendor/github.com/jirevwe/go_partman/web/dist/index.html
@@ -0,0 +1 @@
+placeholder
diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/jmoiron/sqlx/.gitignore
new file mode 100644
index 0000000000..b2be23c87f
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+.idea
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+tags
+environ
diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE
new file mode 100644
index 0000000000..0d31edfa73
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/LICENSE
@@ -0,0 +1,23 @@
+ Copyright (c) 2013, Jason Moiron
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/jmoiron/sqlx/Makefile b/vendor/github.com/jmoiron/sqlx/Makefile
new file mode 100644
index 0000000000..448b9ddd9c
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/Makefile
@@ -0,0 +1,30 @@
+.ONESHELL:
+SHELL = /bin/sh
+.SHELLFLAGS = -ec
+
+BASE_PACKAGE := github.com/jmoiron/sqlx
+
+tooling:
+ go install honnef.co/go/tools/cmd/staticcheck@v0.4.7
+ go install golang.org/x/vuln/cmd/govulncheck@v1.0.4
+ go install golang.org/x/tools/cmd/goimports@v0.20.0
+
+has-changes:
+ git diff --exit-code --quiet HEAD --
+
+lint:
+ go vet ./...
+ staticcheck -checks=all ./...
+
+fmt:
+ go list -f '{{.Dir}}' ./... | xargs -I {} goimports -local $(BASE_PACKAGE) -w {}
+
+vuln-check:
+ govulncheck ./...
+
+test-race:
+ go test -v -race -count=1 ./...
+
+update-dependencies:
+ go get -u -t -v ./...
+ go mod tidy
diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md
new file mode 100644
index 0000000000..5bfd231a11
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/README.md
@@ -0,0 +1,213 @@
+# sqlx
+
+[](https://dl.circleci.com/status-badge/redirect/gh/jmoiron/sqlx/tree/master) [](https://coveralls.io/github/jmoiron/sqlx?branch=master) [](https://godoc.org/github.com/jmoiron/sqlx) [](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
+
+sqlx is a library which provides a set of extensions on go's standard
+`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
+et al. all leave the underlying interfaces untouched, so that their interfaces
+are a superset on the standard ones. This makes it relatively painless to
+integrate existing codebases using database/sql with sqlx.
+
+Major additional concepts are:
+
+* Marshal rows into structs (with embedded struct support), maps, and slices
+* Named parameter support including prepared statements
+* `Get` and `Select` to go quickly from query to struct/slice
+
+In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx),
+there is also some [user documentation](http://jmoiron.github.io/sqlx/) that
+explains how to use `database/sql` along with sqlx.
+
+## Recent Changes
+
+1.3.0:
+
+* `sqlx.DB.Connx(context.Context) *sqlx.Conn`
+* `sqlx.BindDriver(driverName, bindType)`
+* support for `[]map[string]interface{}` to do "batch" insertions
+* allocation & perf improvements for `sqlx.In`
+
+DB.Connx returns an `sqlx.Conn`, which is an `sql.Conn`-alike consistent with
+sqlx's wrapping of other types.
+
+`BindDriver` allows users to control the bindvars that sqlx will use for drivers,
+and add new drivers at runtime. This results in a very slight performance hit
+when resolving the driver into a bind type (~40ns per call), but it allows users
+to specify what bindtype their driver uses even when sqlx has not been updated
+to know about it by default.
+
+### Backwards Compatibility
+
+Compatibility with the most recent two versions of Go is a requirement for any
+new changes. Compatibility beyond that is not guaranteed.
+
+Versioning is done with Go modules. Breaking changes (eg. removing deprecated API)
+will get major version number bumps.
+
+## install
+
+ go get github.com/jmoiron/sqlx
+
+## issues
+
+Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
+`Columns()` does not fully qualify column names in queries like:
+
+```sql
+SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
+```
+
+making a struct or map destination ambiguous. Use `AS` in your queries
+to give columns distinct names, `rows.Scan` to scan them manually, or
+`SliceScan` to get a slice of results.
+
+## usage
+
+Below is an example which shows some common use cases for sqlx. Check
+[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
+usage.
+
+
+```go
+package main
+
+import (
+ "database/sql"
+ "fmt"
+ "log"
+
+ _ "github.com/lib/pq"
+ "github.com/jmoiron/sqlx"
+)
+
+var schema = `
+CREATE TABLE person (
+ first_name text,
+ last_name text,
+ email text
+);
+
+CREATE TABLE place (
+ country text,
+ city text NULL,
+ telcode integer
+)`
+
+type Person struct {
+ FirstName string `db:"first_name"`
+ LastName string `db:"last_name"`
+ Email string
+}
+
+type Place struct {
+ Country string
+ City sql.NullString
+ TelCode int
+}
+
+func main() {
+ // this Pings the database trying to connect
+ // use sqlx.Open() for sql.Open() semantics
+ db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // exec the schema or fail; multi-statement Exec behavior varies between
+ // database drivers; pq will exec them all, sqlite3 won't, ymmv
+ db.MustExec(schema)
+
+ tx := db.MustBegin()
+ tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
+ tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
+ tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
+ tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
+ tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
+ // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
+ tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
+ tx.Commit()
+
+ // Query the database, storing results in a []Person (wrapped in []interface{})
+ people := []Person{}
+ db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
+ jason, john := people[0], people[1]
+
+ fmt.Printf("%#v\n%#v", jason, john)
+ // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
+ // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
+
+ // You can also get a single result, a la QueryRow
+ jason = Person{}
+ err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
+ fmt.Printf("%#v\n", jason)
+ // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
+
+ // if you have null fields and use SELECT *, you must use sql.Null* in your struct
+ places := []Place{}
+ err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+ usa, singsing, honkers := places[0], places[1], places[2]
+
+ fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
+ // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
+ // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
+ // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
+
+ // Loop through rows using only one struct
+ place := Place{}
+ rows, err := db.Queryx("SELECT * FROM place")
+ for rows.Next() {
+ err := rows.StructScan(&place)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Printf("%#v\n", place)
+ }
+ // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
+ // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
+ // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
+
+ // Named queries, using `:name` as the bindvar. Automatic bindvar support
+ // which takes into account the dbtype based on the driverName on sqlx.Open/Connect
+ _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
+ map[string]interface{}{
+ "first": "Bin",
+ "last": "Smuth",
+ "email": "bensmith@allblacks.nz",
+ })
+
+ // Selects Mr. Smith from the database
+ rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
+
+ // Named queries can also use structs. Their bind names follow the same rules
+ // as the name -> db mapping, so struct fields are lowercased and the `db` tag
+ // is taken into consideration.
+ rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
+
+
+ // batch insert
+
+ // batch insert with structs
+ personStructs := []Person{
+ {FirstName: "Ardie", LastName: "Savea", Email: "asavea@ab.co.nz"},
+ {FirstName: "Sonny Bill", LastName: "Williams", Email: "sbw@ab.co.nz"},
+ {FirstName: "Ngani", LastName: "Laumape", Email: "nlaumape@ab.co.nz"},
+ }
+
+ _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email)
+ VALUES (:first_name, :last_name, :email)`, personStructs)
+
+ // batch insert with maps
+ personMaps := []map[string]interface{}{
+ {"first_name": "Ardie", "last_name": "Savea", "email": "asavea@ab.co.nz"},
+ {"first_name": "Sonny Bill", "last_name": "Williams", "email": "sbw@ab.co.nz"},
+ {"first_name": "Ngani", "last_name": "Laumape", "email": "nlaumape@ab.co.nz"},
+ }
+
+ _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email)
+ VALUES (:first_name, :last_name, :email)`, personMaps)
+}
+```
diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go
new file mode 100644
index 0000000000..ec0da4e72e
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/bind.go
@@ -0,0 +1,265 @@
+package sqlx
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "errors"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/jmoiron/sqlx/reflectx"
+)
+
+// Bindvar types supported by Rebind, BindMap and BindStruct.
+const (
+ UNKNOWN = iota
+ QUESTION
+ DOLLAR
+ NAMED
+ AT
+)
+
+var defaultBinds = map[int][]string{
+ DOLLAR: []string{"postgres", "pgx", "pq-timeouts", "cloudsqlpostgres", "ql", "nrpostgres", "cockroach"},
+ QUESTION: []string{"mysql", "sqlite3", "nrmysql", "nrsqlite3"},
+ NAMED: []string{"oci8", "ora", "goracle", "godror"},
+ AT: []string{"sqlserver"},
+}
+
+var binds sync.Map
+
+func init() {
+ for bind, drivers := range defaultBinds {
+ for _, driver := range drivers {
+ BindDriver(driver, bind)
+ }
+ }
+
+}
+
+// BindType returns the bindtype for a given database given a drivername.
+func BindType(driverName string) int {
+ itype, ok := binds.Load(driverName)
+ if !ok {
+ return UNKNOWN
+ }
+ return itype.(int)
+}
+
+// BindDriver sets the BindType for driverName to bindType.
+func BindDriver(driverName string, bindType int) {
+ binds.Store(driverName, bindType)
+}
+
+// FIXME: this should be able to be tolerant of escaped ?'s in queries without
+// losing much speed, and should be to avoid confusion.
+
+// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
+func Rebind(bindType int, query string) string {
+ switch bindType {
+ case QUESTION, UNKNOWN:
+ return query
+ }
+
+ // Add space enough for 10 params before we have to allocate
+ rqb := make([]byte, 0, len(query)+10)
+
+ var i, j int
+
+ for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") {
+ rqb = append(rqb, query[:i]...)
+
+ switch bindType {
+ case DOLLAR:
+ rqb = append(rqb, '$')
+ case NAMED:
+ rqb = append(rqb, ':', 'a', 'r', 'g')
+ case AT:
+ rqb = append(rqb, '@', 'p')
+ }
+
+ j++
+ rqb = strconv.AppendInt(rqb, int64(j), 10)
+
+ query = query[i+1:]
+ }
+
+ return string(append(rqb, query...))
+}
+
+// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
+// much simpler and should be more resistant to odd unicode, but it is twice as
+// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
+// problems arise with its somewhat naive handling of unicode.
+func rebindBuff(bindType int, query string) string {
+ if bindType != DOLLAR {
+ return query
+ }
+
+ b := make([]byte, 0, len(query))
+ rqb := bytes.NewBuffer(b)
+ j := 1
+ for _, r := range query {
+ if r == '?' {
+ rqb.WriteRune('$')
+ rqb.WriteString(strconv.Itoa(j))
+ j++
+ } else {
+ rqb.WriteRune(r)
+ }
+ }
+
+ return rqb.String()
+}
+
+func asSliceForIn(i interface{}) (v reflect.Value, ok bool) {
+ if i == nil {
+ return reflect.Value{}, false
+ }
+
+ v = reflect.ValueOf(i)
+ t := reflectx.Deref(v.Type())
+
+ // Only expand slices
+ if t.Kind() != reflect.Slice {
+ return reflect.Value{}, false
+ }
+
+ // []byte is a driver.Value type so it should not be expanded
+ if t == reflect.TypeOf([]byte{}) {
+ return reflect.Value{}, false
+
+ }
+
+ return v, true
+}
+
+// In expands slice values in args, returning the modified query string
+// and a new arg list that can be executed by a database. The `query` should
+// use the `?` bindVar. The return value uses the `?` bindVar.
+func In(query string, args ...interface{}) (string, []interface{}, error) {
+ // argMeta stores reflect.Value and length for slices and
+ // the value itself for non-slice arguments
+ type argMeta struct {
+ v reflect.Value
+ i interface{}
+ length int
+ }
+
+ var flatArgsCount int
+ var anySlices bool
+
+ var stackMeta [32]argMeta
+
+ var meta []argMeta
+ if len(args) <= len(stackMeta) {
+ meta = stackMeta[:len(args)]
+ } else {
+ meta = make([]argMeta, len(args))
+ }
+
+ for i, arg := range args {
+ if a, ok := arg.(driver.Valuer); ok {
+ var err error
+ arg, err = a.Value()
+ if err != nil {
+ return "", nil, err
+ }
+ }
+
+ if v, ok := asSliceForIn(arg); ok {
+ meta[i].length = v.Len()
+ meta[i].v = v
+
+ anySlices = true
+ flatArgsCount += meta[i].length
+
+ if meta[i].length == 0 {
+ return "", nil, errors.New("empty slice passed to 'in' query")
+ }
+ } else {
+ meta[i].i = arg
+ flatArgsCount++
+ }
+ }
+
+ // don't do any parsing if there aren't any slices; note that this means
+ // some errors that we might have caught below will not be returned.
+ if !anySlices {
+ return query, args, nil
+ }
+
+ newArgs := make([]interface{}, 0, flatArgsCount)
+
+ var buf strings.Builder
+ buf.Grow(len(query) + len(", ?")*flatArgsCount)
+
+ var arg, offset int
+
+ for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') {
+ if arg >= len(meta) {
+ // if an argument wasn't passed, lets return an error; this is
+ // not actually how database/sql Exec/Query works, but since we are
+ // creating an argument list programmatically, we want to be able
+ // to catch these programmer errors earlier.
+ return "", nil, errors.New("number of bindVars exceeds arguments")
+ }
+
+ argMeta := meta[arg]
+ arg++
+
+ // not a slice, continue.
+ // our questionmark will either be written before the next expansion
+ // of a slice or after the loop when writing the rest of the query
+ if argMeta.length == 0 {
+ offset = offset + i + 1
+ newArgs = append(newArgs, argMeta.i)
+ continue
+ }
+
+ // write everything up to and including our ? character
+ buf.WriteString(query[:offset+i+1])
+
+ for si := 1; si < argMeta.length; si++ {
+ buf.WriteString(", ?")
+ }
+
+ newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length)
+
+ // slice the query and reset the offset. this avoids some bookkeeping for
+ // the write after the loop
+ query = query[offset+i+1:]
+ offset = 0
+ }
+
+ buf.WriteString(query)
+
+ if arg < len(meta) {
+ return "", nil, errors.New("number of bindVars less than number arguments")
+ }
+
+ return buf.String(), newArgs, nil
+}
+
+func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} {
+ switch val := v.Interface().(type) {
+ case []interface{}:
+ args = append(args, val...)
+ case []int:
+ for i := range val {
+ args = append(args, val[i])
+ }
+ case []string:
+ for i := range val {
+ args = append(args, val[i])
+ }
+ default:
+ for si := 0; si < vlen; si++ {
+ args = append(args, v.Index(si).Interface())
+ }
+ }
+
+ return args
+}
diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go
new file mode 100644
index 0000000000..b80104175d
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/doc.go
@@ -0,0 +1,11 @@
+// Package sqlx provides general purpose extensions to database/sql.
+//
+// It is intended to seamlessly wrap database/sql and provide convenience
+// methods which are useful in the development of database driven applications.
+// None of the underlying database/sql methods are changed. Instead all extended
+// behavior is implemented through new methods defined on wrapper types.
+//
+// Additions include scanning into structs, named query support, rebinding
+// queries for different drivers, convenient shorthands for common error handling
+// and more.
+package sqlx
diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go
new file mode 100644
index 0000000000..6ac4477713
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/named.go
@@ -0,0 +1,458 @@
+package sqlx
+
+// Named Query Support
+//
+// * BindMap - bind query bindvars to map/struct args
+// * NamedExec, NamedQuery - named query w/ struct or map
+// * NamedStmt - a pre-compiled named query which is a prepared statement
+//
+// Internal Interfaces:
+//
+// * compileNamedQuery - rebind a named query, returning a query and list of names
+// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
+//
+import (
+ "bytes"
+ "database/sql"
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "unicode"
+
+ "github.com/jmoiron/sqlx/reflectx"
+)
+
+// NamedStmt is a prepared statement that executes named queries. Prepare it
+// how you would execute a NamedQuery, but pass in a struct or map when executing.
+type NamedStmt struct {
+ Params []string
+ QueryString string
+ Stmt *Stmt
+}
+
+// Close closes the named statement.
+func (n *NamedStmt) Close() error {
+ return n.Stmt.Close()
+}
+
+// Exec executes a named statement using the struct passed.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return *new(sql.Result), err
+ }
+ return n.Stmt.Exec(args...)
+}
+
+// Query executes a named statement using the struct argument, returning rows.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return nil, err
+ }
+ return n.Stmt.Query(args...)
+}
+
+// QueryRow executes a named statement against the database. Because sqlx cannot
+// create a *sql.Row with an error condition pre-set for binding errors, sqlx
+// returns a *sqlx.Row instead.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRow(arg interface{}) *Row {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return &Row{err: err}
+ }
+ return n.Stmt.QueryRowx(args...)
+}
+
+// MustExec execs a NamedStmt, panicing on error
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
+ res, err := n.Exec(arg)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// Queryx using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
+ r, err := n.Query(arg)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
+}
+
+// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
+// an alias for QueryRow.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
+ return n.QueryRow(arg)
+}
+
+// Select using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
+ rows, err := n.Queryx(arg)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// Get using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
+ r := n.QueryRowx(arg)
+ return r.scanAny(dest, false)
+}
+
+// Unsafe creates an unsafe version of the NamedStmt
+func (n *NamedStmt) Unsafe() *NamedStmt {
+ r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString}
+ r.Stmt.unsafe = true
+ return r
+}
+
+// A union interface of preparer and binder, required to be able to prepare
+// named statements (as the bindtype must be determined).
+type namedPreparer interface {
+ Preparer
+ binder
+}
+
+func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
+ bindType := BindType(p.DriverName())
+ q, args, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return nil, err
+ }
+ stmt, err := Preparex(p, q)
+ if err != nil {
+ return nil, err
+ }
+ return &NamedStmt{
+ QueryString: q,
+ Params: args,
+ Stmt: stmt,
+ }, nil
+}
+
+// convertMapStringInterface attempts to convert v to map[string]interface{}.
+// Unlike v.(map[string]interface{}), this function works on named types that
+// are convertible to map[string]interface{} as well.
+func convertMapStringInterface(v interface{}) (map[string]interface{}, bool) {
+ var m map[string]interface{}
+ mtype := reflect.TypeOf(m)
+ t := reflect.TypeOf(v)
+ if !t.ConvertibleTo(mtype) {
+ return nil, false
+ }
+ return reflect.ValueOf(v).Convert(mtype).Interface().(map[string]interface{}), true
+
+}
+
+func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
+ if maparg, ok := convertMapStringInterface(arg); ok {
+ return bindMapArgs(names, maparg)
+ }
+ return bindArgs(names, arg, m)
+}
+
+// private interface to generate a list of interfaces from a given struct
+// type, given a list of names to pull out of the struct. Used by public
+// BindStruct interface.
+func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
+ arglist := make([]interface{}, 0, len(names))
+
+ // grab the indirected value of arg
+ var v reflect.Value
+ for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
+ v = v.Elem()
+ }
+
+ err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error {
+ if len(t) == 0 {
+ return fmt.Errorf("could not find name %s in %#v", names[i], arg)
+ }
+
+ val := reflectx.FieldByIndexesReadOnly(v, t)
+ arglist = append(arglist, val.Interface())
+
+ return nil
+ })
+
+ return arglist, err
+}
+
+// like bindArgs, but for maps.
+func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
+ arglist := make([]interface{}, 0, len(names))
+
+ for _, name := range names {
+ val, ok := arg[name]
+ if !ok {
+ return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
+ }
+ arglist = append(arglist, val)
+ }
+ return arglist, nil
+}
+
+// bindStruct binds a named parameter query with fields from a struct argument.
+// The rules for binding field names to parameter names follow the same
+// conventions as for StructScan, including obeying the `db` struct tags.
+func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
+ bound, names, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+
+ arglist, err := bindAnyArgs(names, arg, m)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+
+ return bound, arglist, nil
+}
+
+var valuesReg = regexp.MustCompile(`\)\s*(?i)VALUES\s*\(`)
+
+func findMatchingClosingBracketIndex(s string) int {
+ count := 0
+ for i, ch := range s {
+ if ch == '(' {
+ count++
+ }
+ if ch == ')' {
+ count--
+ if count == 0 {
+ return i
+ }
+ }
+ }
+ return 0
+}
+
+func fixBound(bound string, loop int) string {
+ loc := valuesReg.FindStringIndex(bound)
+ // defensive guard when "VALUES (...)" not found
+ if len(loc) < 2 {
+ return bound
+ }
+
+ openingBracketIndex := loc[1] - 1
+ index := findMatchingClosingBracketIndex(bound[openingBracketIndex:])
+ // defensive guard. must have closing bracket
+ if index == 0 {
+ return bound
+ }
+ closingBracketIndex := openingBracketIndex + index + 1
+
+ var buffer bytes.Buffer
+
+ buffer.WriteString(bound[0:closingBracketIndex])
+ for i := 0; i < loop-1; i++ {
+ buffer.WriteString(",")
+ buffer.WriteString(bound[openingBracketIndex:closingBracketIndex])
+ }
+ buffer.WriteString(bound[closingBracketIndex:])
+ return buffer.String()
+}
+
+// bindArray binds a named parameter query with fields from an array or slice of
+// structs argument.
+func bindArray(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
+ // do the initial binding with QUESTION; if bindType is not question,
+ // we can rebind it at the end.
+ bound, names, err := compileNamedQuery([]byte(query), QUESTION)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+ arrayValue := reflect.ValueOf(arg)
+ arrayLen := arrayValue.Len()
+ if arrayLen == 0 {
+ return "", []interface{}{}, fmt.Errorf("length of array is 0: %#v", arg)
+ }
+ var arglist = make([]interface{}, 0, len(names)*arrayLen)
+ for i := 0; i < arrayLen; i++ {
+ elemArglist, err := bindAnyArgs(names, arrayValue.Index(i).Interface(), m)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+ arglist = append(arglist, elemArglist...)
+ }
+ if arrayLen > 1 {
+ bound = fixBound(bound, arrayLen)
+ }
+ // adjust binding type if we weren't on question
+ if bindType != QUESTION {
+ bound = Rebind(bindType, bound)
+ }
+ return bound, arglist, nil
+}
+
+// bindMap binds a named parameter query with a map of arguments.
+func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
+ bound, names, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+
+ arglist, err := bindMapArgs(names, args)
+ return bound, arglist, err
+}
+
+// -- Compilation of Named Queries
+
+// Allow digits and letters in bind params; additionally runes are
+// checked against underscores, meaning that bind params can have be
+// alphanumeric with underscores. Mind the difference between unicode
+// digits and numbers, where '5' is a digit but '五' is not.
+var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
+
+// FIXME: this function isn't safe for unicode named params, as a failing test
+// can testify. This is not a regression but a failure of the original code
+// as well. It should be modified to range over runes in a string rather than
+// bytes, even though this is less convenient and slower. Hopefully the
+// addition of the prepared NamedStmt (which will only do this once) will make
+// up for the slightly slower ad-hoc NamedExec/NamedQuery.
+
+// compile a NamedQuery into an unbound query (using the '?' bindvar) and
+// a list of names.
+func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
+ names = make([]string, 0, 10)
+ rebound := make([]byte, 0, len(qs))
+
+ inName := false
+ last := len(qs) - 1
+ currentVar := 1
+ name := make([]byte, 0, 10)
+
+ for i, b := range qs {
+ // a ':' while we're in a name is an error
+ if b == ':' {
+ // if this is the second ':' in a '::' escape sequence, append a ':'
+ if inName && i > 0 && qs[i-1] == ':' {
+ rebound = append(rebound, ':')
+ inName = false
+ continue
+ } else if inName {
+ err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
+ return query, names, err
+ }
+ inName = true
+ name = []byte{}
+ } else if inName && i > 0 && b == '=' && len(name) == 0 {
+ rebound = append(rebound, ':', '=')
+ inName = false
+ continue
+ // if we're in a name, and this is an allowed character, continue
+ } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last {
+ // append the byte to the name if we are in a name and not on the last byte
+ name = append(name, b)
+ // if we're in a name and it's not an allowed character, the name is done
+ } else if inName {
+ inName = false
+ // if this is the final byte of the string and it is part of the name, then
+ // make sure to add it to the name
+ if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
+ name = append(name, b)
+ }
+ // add the string representation to the names list
+ names = append(names, string(name))
+ // add a proper bindvar for the bindType
+ switch bindType {
+ // oracle only supports named type bind vars even for positional
+ case NAMED:
+ rebound = append(rebound, ':')
+ rebound = append(rebound, name...)
+ case QUESTION, UNKNOWN:
+ rebound = append(rebound, '?')
+ case DOLLAR:
+ rebound = append(rebound, '$')
+ for _, b := range strconv.Itoa(currentVar) {
+ rebound = append(rebound, byte(b))
+ }
+ currentVar++
+ case AT:
+ rebound = append(rebound, '@', 'p')
+ for _, b := range strconv.Itoa(currentVar) {
+ rebound = append(rebound, byte(b))
+ }
+ currentVar++
+ }
+ // add this byte to string unless it was not part of the name
+ if i != last {
+ rebound = append(rebound, b)
+ } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
+ rebound = append(rebound, b)
+ }
+ } else {
+ // this is a normal byte and should just go onto the rebound query
+ rebound = append(rebound, b)
+ }
+ }
+
+ return string(rebound), names, err
+}
+
+// BindNamed binds a struct or a map to a query with named parameters.
+// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.
+func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(bindType, query, arg, mapper())
+}
+
+// Named takes a query using named parameters and an argument and
+// returns a new query with a list of args that can be executed by
+// a database. The return value uses the `?` bindvar.
+func Named(query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(QUESTION, query, arg, mapper())
+}
+
+func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
+ t := reflect.TypeOf(arg)
+ k := t.Kind()
+ switch {
+ case k == reflect.Map && t.Key().Kind() == reflect.String:
+ m, ok := convertMapStringInterface(arg)
+ if !ok {
+ return "", nil, fmt.Errorf("sqlx.bindNamedMapper: unsupported map type: %T", arg)
+ }
+ return bindMap(bindType, query, m)
+ case k == reflect.Array || k == reflect.Slice:
+ return bindArray(bindType, query, arg, m)
+ default:
+ return bindStruct(bindType, query, arg, m)
+ }
+}
+
+// NamedQuery binds a named query and then runs Query on the result using the
+// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
+// map[string]interface{} types.
+func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.Queryx(q, args...)
+}
+
+// NamedExec uses BindStruct to get a query executable by the driver and
+// then runs Exec on the result. Returns an error from the binding
+// or the query execution itself.
+func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.Exec(q, args...)
+}
diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go
new file mode 100644
index 0000000000..9ad23f4ed1
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/named_context.go
@@ -0,0 +1,133 @@
+//go:build go1.8
+// +build go1.8
+
+package sqlx
+
+import (
+ "context"
+ "database/sql"
+)
+
+// A union interface of contextPreparer and binder, required to be able to
+// prepare named statements with context (as the bindtype must be determined).
+type namedPreparerContext interface {
+ PreparerContext
+ binder
+}
+
+func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) {
+ bindType := BindType(p.DriverName())
+ q, args, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return nil, err
+ }
+ stmt, err := PreparexContext(ctx, p, q)
+ if err != nil {
+ return nil, err
+ }
+ return &NamedStmt{
+ QueryString: q,
+ Params: args,
+ Stmt: stmt,
+ }, nil
+}
+
+// ExecContext executes a named statement using the struct passed.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return *new(sql.Result), err
+ }
+ return n.Stmt.ExecContext(ctx, args...)
+}
+
+// QueryContext executes a named statement using the struct argument, returning rows.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return nil, err
+ }
+ return n.Stmt.QueryContext(ctx, args...)
+}
+
+// QueryRowContext executes a named statement against the database. Because sqlx cannot
+// create a *sql.Row with an error condition pre-set for binding errors, sqlx
+// returns a *sqlx.Row instead.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return &Row{err: err}
+ }
+ return n.Stmt.QueryRowxContext(ctx, args...)
+}
+
+// MustExecContext execs a NamedStmt, panicing on error
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result {
+ res, err := n.ExecContext(ctx, arg)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// QueryxContext using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) {
+ r, err := n.QueryContext(ctx, arg)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
+}
+
+// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is
+// an alias for QueryRow.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row {
+ return n.QueryRowContext(ctx, arg)
+}
+
+// SelectContext using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error {
+ rows, err := n.QueryxContext(ctx, arg)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// GetContext using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error {
+ r := n.QueryRowxContext(ctx, arg)
+ return r.scanAny(dest, false)
+}
+
+// NamedQueryContext binds a named query and then runs Query on the result using the
+// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
+// map[string]interface{} types.
+func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.QueryxContext(ctx, q, args...)
+}
+
+// NamedExecContext uses BindStruct to get a query executable by the driver and
+// then runs Exec on the result. Returns an error from the binding
+// or the query execution itself.
+func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.ExecContext(ctx, q, args...)
+}
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md
new file mode 100644
index 0000000000..f01d3d1f08
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/reflectx/README.md
@@ -0,0 +1,17 @@
+# reflectx
+
+The sqlx package has special reflect needs. In particular, it needs to:
+
+* be able to map a name to a field
+* understand embedded structs
+* understand mapping names to fields by a particular tag
+* user specified name -> field mapping functions
+
+These behaviors mimic the behaviors by the standard library marshallers and also the
+behavior of standard Go accessors.
+
+The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
+addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
+tags in the ways that are vital to most marshallers, and they are slow.
+
+This reflectx package extends reflect to achieve these goals.
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
new file mode 100644
index 0000000000..8ec6a13828
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
@@ -0,0 +1,443 @@
+// Package reflectx implements extensions to the standard reflect lib suitable
+// for implementing marshalling and unmarshalling packages. The main Mapper type
+// allows for Go-compatible named attribute access, including accessing embedded
+// struct attributes and the ability to use functions and struct tags to
+// customize field names.
+package reflectx
+
+import (
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+// A FieldInfo is metadata for a struct field.
+type FieldInfo struct {
+ Index []int
+ Path string
+ Field reflect.StructField
+ Zero reflect.Value
+ Name string
+ Options map[string]string
+ Embedded bool
+ Children []*FieldInfo
+ Parent *FieldInfo
+}
+
+// A StructMap is an index of field metadata for a struct.
+type StructMap struct {
+ Tree *FieldInfo
+ Index []*FieldInfo
+ Paths map[string]*FieldInfo
+ Names map[string]*FieldInfo
+}
+
+// GetByPath returns a *FieldInfo for a given string path.
+func (f StructMap) GetByPath(path string) *FieldInfo {
+ return f.Paths[path]
+}
+
+// GetByTraversal returns a *FieldInfo for a given integer path. It is
+// analogous to reflect.FieldByIndex, but using the cached traversal
+// rather than re-executing the reflect machinery each time.
+func (f StructMap) GetByTraversal(index []int) *FieldInfo {
+ if len(index) == 0 {
+ return nil
+ }
+
+ tree := f.Tree
+ for _, i := range index {
+ if i >= len(tree.Children) || tree.Children[i] == nil {
+ return nil
+ }
+ tree = tree.Children[i]
+ }
+ return tree
+}
+
+// Mapper is a general purpose mapper of names to struct fields. A Mapper
+// behaves like most marshallers in the standard library, obeying a field tag
+// for name mapping but also providing a basic transform function.
+type Mapper struct {
+ cache map[reflect.Type]*StructMap
+ tagName string
+ tagMapFunc func(string) string
+ mapFunc func(string) string
+ mutex sync.Mutex
+}
+
+// NewMapper returns a new mapper using the tagName as its struct field tag.
+// If tagName is the empty string, it is ignored.
+func NewMapper(tagName string) *Mapper {
+ return &Mapper{
+ cache: make(map[reflect.Type]*StructMap),
+ tagName: tagName,
+ }
+}
+
+// NewMapperTagFunc returns a new mapper which contains a mapper for field names
+// AND a mapper for tag values. This is useful for tags like json which can
+// have values like "name,omitempty".
+func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper {
+ return &Mapper{
+ cache: make(map[reflect.Type]*StructMap),
+ tagName: tagName,
+ mapFunc: mapFunc,
+ tagMapFunc: tagMapFunc,
+ }
+}
+
+// NewMapperFunc returns a new mapper which optionally obeys a field tag and
+// a struct field name mapper func given by f. Tags will take precedence, but
+// for any other field, the mapped name will be f(field.Name)
+func NewMapperFunc(tagName string, f func(string) string) *Mapper {
+ return &Mapper{
+ cache: make(map[reflect.Type]*StructMap),
+ tagName: tagName,
+ mapFunc: f,
+ }
+}
+
+// TypeMap returns a mapping of field strings to int slices representing
+// the traversal down the struct to reach the field.
+func (m *Mapper) TypeMap(t reflect.Type) *StructMap {
+ m.mutex.Lock()
+ mapping, ok := m.cache[t]
+ if !ok {
+ mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc)
+ m.cache[t] = mapping
+ }
+ m.mutex.Unlock()
+ return mapping
+}
+
+// FieldMap returns the mapper's mapping of field names to reflect values. Panics
+// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
+func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
+ v = reflect.Indirect(v)
+ mustBe(v, reflect.Struct)
+
+ r := map[string]reflect.Value{}
+ tm := m.TypeMap(v.Type())
+ for tagName, fi := range tm.Names {
+ r[tagName] = FieldByIndexes(v, fi.Index)
+ }
+ return r
+}
+
+// FieldByName returns a field by its mapped name as a reflect.Value.
+// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
+// Returns zero Value if the name is not found.
+func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
+ v = reflect.Indirect(v)
+ mustBe(v, reflect.Struct)
+
+ tm := m.TypeMap(v.Type())
+ fi, ok := tm.Names[name]
+ if !ok {
+ return v
+ }
+ return FieldByIndexes(v, fi.Index)
+}
+
+// FieldsByName returns a slice of values corresponding to the slice of names
+// for the value. Panics if v's Kind is not Struct or v is not Indirectable
+// to a struct Kind. Returns zero Value for each name not found.
+func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
+ v = reflect.Indirect(v)
+ mustBe(v, reflect.Struct)
+
+ tm := m.TypeMap(v.Type())
+ vals := make([]reflect.Value, 0, len(names))
+ for _, name := range names {
+ fi, ok := tm.Names[name]
+ if !ok {
+ vals = append(vals, *new(reflect.Value))
+ } else {
+ vals = append(vals, FieldByIndexes(v, fi.Index))
+ }
+ }
+ return vals
+}
+
+// TraversalsByName returns a slice of int slices which represent the struct
+// traversals for each mapped name. Panics if t is not a struct or Indirectable
+// to a struct. Returns empty int slice for each name not found.
+func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
+ r := make([][]int, 0, len(names))
+ m.TraversalsByNameFunc(t, names, func(_ int, i []int) error {
+ if i == nil {
+ r = append(r, []int{})
+ } else {
+ r = append(r, i)
+ }
+
+ return nil
+ })
+ return r
+}
+
+// TraversalsByNameFunc traverses the mapped names and calls fn with the index of
+// each name and the struct traversal represented by that name. Panics if t is not
+// a struct or Indirectable to a struct. Returns the first error returned by fn or nil.
+func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error {
+ t = Deref(t)
+ mustBe(t, reflect.Struct)
+ tm := m.TypeMap(t)
+ for i, name := range names {
+ fi, ok := tm.Names[name]
+ if !ok {
+ if err := fn(i, nil); err != nil {
+ return err
+ }
+ } else {
+ if err := fn(i, fi.Index); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// FieldByIndexes returns a value for the field given by the struct traversal
+// for the given value.
+func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
+ for _, i := range indexes {
+ v = reflect.Indirect(v).Field(i)
+ // if this is a pointer and it's nil, allocate a new value and set it
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ alloc := reflect.New(Deref(v.Type()))
+ v.Set(alloc)
+ }
+ if v.Kind() == reflect.Map && v.IsNil() {
+ v.Set(reflect.MakeMap(v.Type()))
+ }
+ }
+ return v
+}
+
+// FieldByIndexesReadOnly returns a value for a particular struct traversal,
+// but is not concerned with allocating nil pointers because the value is
+// going to be used for reading and not setting.
+func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
+ for _, i := range indexes {
+ v = reflect.Indirect(v).Field(i)
+ }
+ return v
+}
+
+// Deref is Indirect for reflect.Types
+func Deref(t reflect.Type) reflect.Type {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t
+}
+
+// -- helpers & utilities --
+
+type kinder interface {
+ Kind() reflect.Kind
+}
+
+// mustBe checks a value against a kind, panicing with a reflect.ValueError
+// if the kind isn't that which is required.
+func mustBe(v kinder, expected reflect.Kind) {
+ if k := v.Kind(); k != expected {
+ panic(&reflect.ValueError{Method: methodName(), Kind: k})
+ }
+}
+
+// methodName returns the caller of the function calling methodName
+func methodName() string {
+ pc, _, _, _ := runtime.Caller(2)
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return "unknown method"
+ }
+ return f.Name()
+}
+
+type typeQueue struct {
+ t reflect.Type
+ fi *FieldInfo
+ pp string // Parent path
+}
+
+// A copying append that creates a new slice each time.
+func apnd(is []int, i int) []int {
+ x := make([]int, len(is)+1)
+ copy(x, is)
+ x[len(x)-1] = i
+ return x
+}
+
+type mapf func(string) string
+
+// parseName parses the tag and the target name for the given field using
+// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the
+// field's name to a target name, and tagMapFunc for mapping the tag to
+// a target name.
+func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) {
+ // first, set the fieldName to the field's name
+ fieldName = field.Name
+ // if a mapFunc is set, use that to override the fieldName
+ if mapFunc != nil {
+ fieldName = mapFunc(fieldName)
+ }
+
+ // if there's no tag to look for, return the field name
+ if tagName == "" {
+ return "", fieldName
+ }
+
+ // if this tag is not set using the normal convention in the tag,
+ // then return the fieldname.. this check is done because according
+ // to the reflect documentation:
+ // If the tag does not have the conventional format,
+ // the value returned by Get is unspecified.
+ // which doesn't sound great.
+ if !strings.Contains(string(field.Tag), tagName+":") {
+ return "", fieldName
+ }
+
+ // at this point we're fairly sure that we have a tag, so lets pull it out
+ tag = field.Tag.Get(tagName)
+
+ // if we have a mapper function, call it on the whole tag
+ // XXX: this is a change from the old version, which pulled out the name
+ // before the tagMapFunc could be run, but I think this is the right way
+ if tagMapFunc != nil {
+ tag = tagMapFunc(tag)
+ }
+
+ // finally, split the options from the name
+ parts := strings.Split(tag, ",")
+ fieldName = parts[0]
+
+ return tag, fieldName
+}
+
+// parseOptions parses options out of a tag string, skipping the name
+func parseOptions(tag string) map[string]string {
+ parts := strings.Split(tag, ",")
+ options := make(map[string]string, len(parts))
+ if len(parts) > 1 {
+ for _, opt := range parts[1:] {
+ // short circuit potentially expensive split op
+ if strings.Contains(opt, "=") {
+ kv := strings.Split(opt, "=")
+ options[kv[0]] = kv[1]
+ continue
+ }
+ options[opt] = ""
+ }
+ }
+ return options
+}
+
+// getMapping returns a mapping for the t type, using the tagName, mapFunc and
+// tagMapFunc to determine the canonical names of fields.
+func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap {
+ m := []*FieldInfo{}
+
+ root := &FieldInfo{}
+ queue := []typeQueue{}
+ queue = append(queue, typeQueue{Deref(t), root, ""})
+
+QueueLoop:
+ for len(queue) != 0 {
+ // pop the first item off of the queue
+ tq := queue[0]
+ queue = queue[1:]
+
+ // ignore recursive field
+ for p := tq.fi.Parent; p != nil; p = p.Parent {
+ if tq.fi.Field.Type == p.Field.Type {
+ continue QueueLoop
+ }
+ }
+
+ nChildren := 0
+ if tq.t.Kind() == reflect.Struct {
+ nChildren = tq.t.NumField()
+ }
+ tq.fi.Children = make([]*FieldInfo, nChildren)
+
+ // iterate through all of its fields
+ for fieldPos := 0; fieldPos < nChildren; fieldPos++ {
+
+ f := tq.t.Field(fieldPos)
+
+ // parse the tag and the target name using the mapping options for this field
+ tag, name := parseName(f, tagName, mapFunc, tagMapFunc)
+
+ // if the name is "-", disabled via a tag, skip it
+ if name == "-" {
+ continue
+ }
+
+ fi := FieldInfo{
+ Field: f,
+ Name: name,
+ Zero: reflect.New(f.Type).Elem(),
+ Options: parseOptions(tag),
+ }
+
+ // if the path is empty this path is just the name
+ if tq.pp == "" {
+ fi.Path = fi.Name
+ } else {
+ fi.Path = tq.pp + "." + fi.Name
+ }
+
+ // skip unexported fields
+ if len(f.PkgPath) != 0 && !f.Anonymous {
+ continue
+ }
+
+ // bfs search of anonymous embedded structs
+ if f.Anonymous {
+ pp := tq.pp
+ if tag != "" {
+ pp = fi.Path
+ }
+
+ fi.Embedded = true
+ fi.Index = apnd(tq.fi.Index, fieldPos)
+ nChildren := 0
+ ft := Deref(f.Type)
+ if ft.Kind() == reflect.Struct {
+ nChildren = ft.NumField()
+ }
+ fi.Children = make([]*FieldInfo, nChildren)
+ queue = append(queue, typeQueue{Deref(f.Type), &fi, pp})
+ } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) {
+ fi.Index = apnd(tq.fi.Index, fieldPos)
+ fi.Children = make([]*FieldInfo, Deref(f.Type).NumField())
+ queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path})
+ }
+
+ fi.Index = apnd(tq.fi.Index, fieldPos)
+ fi.Parent = tq.fi
+ tq.fi.Children[fieldPos] = &fi
+ m = append(m, &fi)
+ }
+ }
+
+ flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}}
+ for _, fi := range flds.Index {
+ // check if nothing has already been pushed with the same path
+ // sometimes you can choose to override a type using embedded struct
+ fld, ok := flds.Paths[fi.Path]
+ if !ok || fld.Embedded {
+ flds.Paths[fi.Path] = fi
+ if fi.Name != "" && !fi.Embedded {
+ flds.Names[fi.Path] = fi
+ }
+ }
+ }
+
+ return flds
+}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go
new file mode 100644
index 0000000000..8259a4feb6
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/sqlx.go
@@ -0,0 +1,1054 @@
+package sqlx
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/jmoiron/sqlx/reflectx"
+)
+
+// Although the NameMapper is convenient, in practice it should not
+// be relied on except for application code. If you are writing a library
+// that uses sqlx, you should be aware that the name mappings you expect
+// can be overridden by your user's application.
+
+// NameMapper is used to map column names to struct field names. By default,
+// it uses strings.ToLower to lowercase struct field names. It can be set
+// to whatever you want, but it is encouraged to be set before sqlx is used
+// as name-to-field mappings are cached after first use on a type.
+var NameMapper = strings.ToLower
+var origMapper = reflect.ValueOf(NameMapper)
+
+// Rather than creating on init, this is created when necessary so that
+// importers have time to customize the NameMapper.
+var mpr *reflectx.Mapper
+
+// mprMu protects mpr.
+var mprMu sync.Mutex
+
+// mapper returns a valid mapper using the configured NameMapper func.
+func mapper() *reflectx.Mapper {
+ mprMu.Lock()
+ defer mprMu.Unlock()
+
+ if mpr == nil {
+ mpr = reflectx.NewMapperFunc("db", NameMapper)
+ } else if origMapper != reflect.ValueOf(NameMapper) {
+ // if NameMapper has changed, create a new mapper
+ mpr = reflectx.NewMapperFunc("db", NameMapper)
+ origMapper = reflect.ValueOf(NameMapper)
+ }
+ return mpr
+}
+
+// isScannable takes the reflect.Type and the actual dest value and returns
+// whether or not it's Scannable. Something is scannable if:
+// - it is not a struct
+// - it implements sql.Scanner
+// - it has no exported fields
+func isScannable(t reflect.Type) bool {
+ if reflect.PtrTo(t).Implements(_scannerInterface) {
+ return true
+ }
+ if t.Kind() != reflect.Struct {
+ return true
+ }
+
+ // it's not important that we use the right mapper for this particular object,
+ // we're only concerned on how many exported fields this struct has
+ return len(mapper().TypeMap(t).Index) == 0
+}
+
+// ColScanner is an interface used by MapScan and SliceScan
+type ColScanner interface {
+ Columns() ([]string, error)
+ Scan(dest ...interface{}) error
+ Err() error
+}
+
+// Queryer is an interface used by Get and Select
+type Queryer interface {
+ Query(query string, args ...interface{}) (*sql.Rows, error)
+ Queryx(query string, args ...interface{}) (*Rows, error)
+ QueryRowx(query string, args ...interface{}) *Row
+}
+
+// Execer is an interface used by MustExec and LoadFile
+type Execer interface {
+ Exec(query string, args ...interface{}) (sql.Result, error)
+}
+
+// Binder is an interface for something which can bind queries (Tx, DB)
+type binder interface {
+ DriverName() string
+ Rebind(string) string
+ BindNamed(string, interface{}) (string, []interface{}, error)
+}
+
+// Ext is a union interface which can bind, query, and exec, used by
+// NamedQuery and NamedExec.
+type Ext interface {
+ binder
+ Queryer
+ Execer
+}
+
+// Preparer is an interface used by Preparex.
+type Preparer interface {
+ Prepare(query string) (*sql.Stmt, error)
+}
+
+// determine if any of our extensions are unsafe
+func isUnsafe(i interface{}) bool {
+ switch v := i.(type) {
+ case Row:
+ return v.unsafe
+ case *Row:
+ return v.unsafe
+ case Rows:
+ return v.unsafe
+ case *Rows:
+ return v.unsafe
+ case NamedStmt:
+ return v.Stmt.unsafe
+ case *NamedStmt:
+ return v.Stmt.unsafe
+ case Stmt:
+ return v.unsafe
+ case *Stmt:
+ return v.unsafe
+ case qStmt:
+ return v.unsafe
+ case *qStmt:
+ return v.unsafe
+ case DB:
+ return v.unsafe
+ case *DB:
+ return v.unsafe
+ case Tx:
+ return v.unsafe
+ case *Tx:
+ return v.unsafe
+ case sql.Rows, *sql.Rows:
+ return false
+ default:
+ return false
+ }
+}
+
+func mapperFor(i interface{}) *reflectx.Mapper {
+ switch i := i.(type) {
+ case DB:
+ return i.Mapper
+ case *DB:
+ return i.Mapper
+ case Tx:
+ return i.Mapper
+ case *Tx:
+ return i.Mapper
+ default:
+ return mapper()
+ }
+}
+
+var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
+
+//lint:ignore U1000 ignoring this for now
+var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// Row is a reimplementation of sql.Row in order to gain access to the underlying
+// sql.Rows.Columns() data, necessary for StructScan.
+type Row struct {
+ err error
+ unsafe bool
+ rows *sql.Rows
+ Mapper *reflectx.Mapper
+}
+
+// Scan is a fixed implementation of sql.Row.Scan, which does not discard the
+// underlying error from the internal rows object if it exists.
+func (r *Row) Scan(dest ...interface{}) error {
+ if r.err != nil {
+ return r.err
+ }
+
+ // TODO(bradfitz): for now we need to defensively clone all
+ // []byte that the driver returned (not permitting
+ // *RawBytes in Rows.Scan), since we're about to close
+ // the Rows in our defer, when we return from this function.
+ // the contract with the driver.Next(...) interface is that it
+ // can return slices into read-only temporary memory that's
+ // only valid until the next Scan/Close. But the TODO is that
+ // for a lot of drivers, this copy will be unnecessary. We
+ // should provide an optional interface for drivers to
+ // implement to say, "don't worry, the []bytes that I return
+ // from Next will not be modified again." (for instance, if
+ // they were obtained from the network anyway) But for now we
+ // don't care.
+ defer r.rows.Close()
+ for _, dp := range dest {
+ if _, ok := dp.(*sql.RawBytes); ok {
+ return errors.New("sql: RawBytes isn't allowed on Row.Scan")
+ }
+ }
+
+ if !r.rows.Next() {
+ if err := r.rows.Err(); err != nil {
+ return err
+ }
+ return sql.ErrNoRows
+ }
+ err := r.rows.Scan(dest...)
+ if err != nil {
+ return err
+ }
+ // Make sure the query can be processed to completion with no errors.
+ if err := r.rows.Close(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually
+// returned by Row.Scan()
+func (r *Row) Columns() ([]string, error) {
+ if r.err != nil {
+ return []string{}, r.err
+ }
+ return r.rows.Columns()
+}
+
+// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error
+func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) {
+ if r.err != nil {
+ return []*sql.ColumnType{}, r.err
+ }
+ return r.rows.ColumnTypes()
+}
+
+// Err returns the error encountered while scanning.
+func (r *Row) Err() error {
+ return r.err
+}
+
+// DB is a wrapper around sql.DB which keeps track of the driverName upon Open,
+// used mostly to automatically bind named queries using the right bindvars.
+type DB struct {
+ *sql.DB
+ driverName string
+ unsafe bool
+ Mapper *reflectx.Mapper
+}
+
+// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The
+// driverName of the original database is required for named query support.
+//
+//lint:ignore ST1003 changing this would break the package interface.
+func NewDb(db *sql.DB, driverName string) *DB {
+ return &DB{DB: db, driverName: driverName, Mapper: mapper()}
+}
+
+// DriverName returns the driverName passed to the Open function for this DB.
+func (db *DB) DriverName() string {
+ return db.driverName
+}
+
+// Open is the same as sql.Open, but returns an *sqlx.DB instead.
+func Open(driverName, dataSourceName string) (*DB, error) {
+ db, err := sql.Open(driverName, dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+ return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err
+}
+
+// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error.
+func MustOpen(driverName, dataSourceName string) *DB {
+ db, err := Open(driverName, dataSourceName)
+ if err != nil {
+ panic(err)
+ }
+ return db
+}
+
+// MapperFunc sets a new mapper for this db using the default sqlx struct tag
+// and the provided mapper function.
+func (db *DB) MapperFunc(mf func(string) string) {
+ db.Mapper = reflectx.NewMapperFunc("db", mf)
+}
+
+// Rebind transforms a query from QUESTION to the DB driver's bindvar type.
+func (db *DB) Rebind(query string) string {
+ return Rebind(BindType(db.driverName), query)
+}
+
+// Unsafe returns a version of DB which will silently succeed to scan when
+// columns in the SQL result have no fields in the destination struct.
+// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its
+// safety behavior.
+func (db *DB) Unsafe() *DB {
+ return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper}
+}
+
+// BindNamed binds a query using the DB driver's bindvar type.
+func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper)
+}
+
+// NamedQuery using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) {
+ return NamedQuery(db, query, arg)
+}
+
+// NamedExec using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) {
+ return NamedExec(db, query, arg)
+}
+
+// Select using this DB.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) Select(dest interface{}, query string, args ...interface{}) error {
+ return Select(db, dest, query, args...)
+}
+
+// Get using this DB.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (db *DB) Get(dest interface{}, query string, args ...interface{}) error {
+ return Get(db, dest, query, args...)
+}
+
+// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead
+// of an *sql.Tx.
+func (db *DB) MustBegin() *Tx {
+ tx, err := db.Beginx()
+ if err != nil {
+ panic(err)
+ }
+ return tx
+}
+
+// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx.
+func (db *DB) Beginx() (*Tx, error) {
+ tx, err := db.DB.Begin()
+ if err != nil {
+ return nil, err
+ }
+ return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// Queryx queries the database and returns an *sqlx.Rows.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) {
+ r, err := db.DB.Query(query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// QueryRowx queries the database and returns an *sqlx.Row.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) QueryRowx(query string, args ...interface{}) *Row {
+ rows, err := db.DB.Query(query, args...)
+ return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
+}
+
+// MustExec (panic) runs MustExec using this database.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) MustExec(query string, args ...interface{}) sql.Result {
+ return MustExec(db, query, args...)
+}
+
+// Preparex returns an sqlx.Stmt instead of a sql.Stmt
+func (db *DB) Preparex(query string) (*Stmt, error) {
+ return Preparex(db, query)
+}
+
+// PrepareNamed returns an sqlx.NamedStmt
+func (db *DB) PrepareNamed(query string) (*NamedStmt, error) {
+ return prepareNamed(db, query)
+}
+
+// Conn is a wrapper around sql.Conn with extra functionality
+type Conn struct {
+ *sql.Conn
+ driverName string
+ unsafe bool
+ Mapper *reflectx.Mapper
+}
+
+// Tx is an sqlx wrapper around sql.Tx with extra functionality
+type Tx struct {
+ *sql.Tx
+ driverName string
+ unsafe bool
+ Mapper *reflectx.Mapper
+}
+
+// DriverName returns the driverName used by the DB which began this transaction.
+func (tx *Tx) DriverName() string {
+ return tx.driverName
+}
+
+// Rebind a query within a transaction's bindvar type.
+func (tx *Tx) Rebind(query string) string {
+ return Rebind(BindType(tx.driverName), query)
+}
+
+// Unsafe returns a version of Tx which will silently succeed to scan when
+// columns in the SQL result have no fields in the destination struct.
+func (tx *Tx) Unsafe() *Tx {
+ return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper}
+}
+
+// BindNamed binds a query within a transaction's bindvar type.
+func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper)
+}
+
+// NamedQuery within a transaction.
+// Any named placeholder parameters are replaced with fields from arg.
+func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) {
+ return NamedQuery(tx, query, arg)
+}
+
+// NamedExec a named query within a transaction.
+// Any named placeholder parameters are replaced with fields from arg.
+func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) {
+ return NamedExec(tx, query, arg)
+}
+
+// Select within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error {
+ return Select(tx, dest, query, args...)
+}
+
+// Queryx within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) {
+ r, err := tx.Tx.Query(query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
+}
+
+// QueryRowx within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row {
+ rows, err := tx.Tx.Query(query, args...)
+ return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
+}
+
+// Get within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error {
+ return Get(tx, dest, query, args...)
+}
+
+// MustExec runs MustExec within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result {
+ return MustExec(tx, query, args...)
+}
+
+// Preparex a statement within a transaction.
+func (tx *Tx) Preparex(query string) (*Stmt, error) {
+ return Preparex(tx, query)
+}
+
+// Stmtx returns a version of the prepared statement which runs within a transaction. Provided
+// stmt can be either *sql.Stmt or *sqlx.Stmt.
+func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
+ var s *sql.Stmt
+ switch v := stmt.(type) {
+ case Stmt:
+ s = v.Stmt
+ case *Stmt:
+ s = v.Stmt
+ case *sql.Stmt:
+ s = v
+ default:
+ panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
+ }
+ return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper}
+}
+
+// NamedStmt returns a version of the prepared statement which runs within a transaction.
+func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt {
+ return &NamedStmt{
+ QueryString: stmt.QueryString,
+ Params: stmt.Params,
+ Stmt: tx.Stmtx(stmt.Stmt),
+ }
+}
+
+// PrepareNamed returns an sqlx.NamedStmt
+func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) {
+ return prepareNamed(tx, query)
+}
+
+// Stmt is an sqlx wrapper around sql.Stmt with extra functionality
+type Stmt struct {
+ *sql.Stmt
+ unsafe bool
+ Mapper *reflectx.Mapper
+}
+
+// Unsafe returns a version of Stmt which will silently succeed to scan when
+// columns in the SQL result have no fields in the destination struct.
+func (s *Stmt) Unsafe() *Stmt {
+ return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper}
+}
+
+// Select using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) Select(dest interface{}, args ...interface{}) error {
+ return Select(&qStmt{s}, dest, "", args...)
+}
+
+// Get using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (s *Stmt) Get(dest interface{}, args ...interface{}) error {
+ return Get(&qStmt{s}, dest, "", args...)
+}
+
+// MustExec (panic) using this statement. Note that the query portion of the error
+// output will be blank, as Stmt does not expose its query.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) MustExec(args ...interface{}) sql.Result {
+ return MustExec(&qStmt{s}, "", args...)
+}
+
+// QueryRowx using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) QueryRowx(args ...interface{}) *Row {
+ qs := &qStmt{s}
+ return qs.QueryRowx("", args...)
+}
+
+// Queryx using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) {
+ qs := &qStmt{s}
+ return qs.Queryx("", args...)
+}
+
+// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by
+// implementing those interfaces and ignoring the `query` argument.
+type qStmt struct{ *Stmt }
+
+func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) {
+ return q.Stmt.Query(args...)
+}
+
+func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) {
+ r, err := q.Stmt.Query(args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
+}
+
+func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row {
+ rows, err := q.Stmt.Query(args...)
+ return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
+}
+
+func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) {
+ return q.Stmt.Exec(args...)
+}
+
+// Rows is a wrapper around sql.Rows which caches costly reflect operations
+// during a looped StructScan
+type Rows struct {
+ *sql.Rows
+ unsafe bool
+ Mapper *reflectx.Mapper
+ // these fields cache memory use for a rows during iteration w/ structScan
+ started bool
+ fields [][]int
+ values []interface{}
+}
+
+// SliceScan using this Rows.
+func (r *Rows) SliceScan() ([]interface{}, error) {
+ return SliceScan(r)
+}
+
+// MapScan using this Rows.
+func (r *Rows) MapScan(dest map[string]interface{}) error {
+ return MapScan(r, dest)
+}
+
+// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct.
+// Use this and iterate over Rows manually when the memory load of Select() might be
+// prohibitive. *Rows.StructScan caches the reflect work of matching up column
+// positions to fields to avoid that overhead per scan, which means it is not safe
+// to run StructScan on the same Rows instance with different struct types.
+func (r *Rows) StructScan(dest interface{}) error {
+ v := reflect.ValueOf(dest)
+
+ if v.Kind() != reflect.Ptr {
+ return errors.New("must pass a pointer, not a value, to StructScan destination")
+ }
+
+ v = v.Elem()
+
+ if !r.started {
+ columns, err := r.Columns()
+ if err != nil {
+ return err
+ }
+ m := r.Mapper
+
+ r.fields = m.TraversalsByName(v.Type(), columns)
+ // if we are not unsafe and are missing fields, return an error
+ if f, err := missingFields(r.fields); err != nil && !r.unsafe {
+ return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
+ }
+ r.values = make([]interface{}, len(columns))
+ r.started = true
+ }
+
+ err := fieldsByTraversal(v, r.fields, r.values, true)
+ if err != nil {
+ return err
+ }
+ // scan into the struct field pointers and append to our results
+ err = r.Scan(r.values...)
+ if err != nil {
+ return err
+ }
+ return r.Err()
+}
+
+// Connect to a database and verify with a ping.
+func Connect(driverName, dataSourceName string) (*DB, error) {
+ db, err := Open(driverName, dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+ err = db.Ping()
+ if err != nil {
+ db.Close()
+ return nil, err
+ }
+ return db, nil
+}
+
+// MustConnect connects to a database and panics on error.
+func MustConnect(driverName, dataSourceName string) *DB {
+ db, err := Connect(driverName, dataSourceName)
+ if err != nil {
+ panic(err)
+ }
+ return db
+}
+
+// Preparex prepares a statement.
+func Preparex(p Preparer, query string) (*Stmt, error) {
+ s, err := p.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
+}
+
+// Select executes a query using the provided Queryer, and StructScans each row
+// into dest, which must be a slice. If the slice elements are scannable, then
+// the result set must have only one column. Otherwise, StructScan is used.
+// The *sql.Rows are closed automatically.
+// Any placeholder parameters are replaced with supplied args.
+func Select(q Queryer, dest interface{}, query string, args ...interface{}) error {
+ rows, err := q.Queryx(query, args...)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// Get does a QueryRow using the provided Queryer, and scans the resulting row
+// to dest. If dest is scannable, the result must only have one column. Otherwise,
+// StructScan is used. Get will return sql.ErrNoRows like row.Scan would.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func Get(q Queryer, dest interface{}, query string, args ...interface{}) error {
+ r := q.QueryRowx(query, args...)
+ return r.scanAny(dest, false)
+}
+
+// LoadFile exec's every statement in a file (as a single call to Exec).
+// LoadFile may return a nil *sql.Result if errors are encountered locating or
+// reading the file at path. LoadFile reads the entire file into memory, so it
+// is not suitable for loading large data dumps, but can be useful for initializing
+// schemas or loading indexes.
+//
+// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
+// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
+// this by requiring something with DriverName() and then attempting to split the
+// queries will be difficult to get right, and its current driver-specific behavior
+// is deemed at least not complex in its incorrectness.
+func LoadFile(e Execer, path string) (*sql.Result, error) {
+ realpath, err := filepath.Abs(path)
+ if err != nil {
+ return nil, err
+ }
+ contents, err := ioutil.ReadFile(realpath)
+ if err != nil {
+ return nil, err
+ }
+ res, err := e.Exec(string(contents))
+ return &res, err
+}
+
+// MustExec execs the query using e and panics if there was an error.
+// Any placeholder parameters are replaced with supplied args.
+func MustExec(e Execer, query string, args ...interface{}) sql.Result {
+ res, err := e.Exec(query, args...)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// SliceScan using this Rows.
+func (r *Row) SliceScan() ([]interface{}, error) {
+ return SliceScan(r)
+}
+
+// MapScan using this Rows.
+func (r *Row) MapScan(dest map[string]interface{}) error {
+ return MapScan(r, dest)
+}
+
+func (r *Row) scanAny(dest interface{}, structOnly bool) error {
+ if r.err != nil {
+ return r.err
+ }
+ if r.rows == nil {
+ r.err = sql.ErrNoRows
+ return r.err
+ }
+ defer r.rows.Close()
+
+ v := reflect.ValueOf(dest)
+ if v.Kind() != reflect.Ptr {
+ return errors.New("must pass a pointer, not a value, to StructScan destination")
+ }
+ if v.IsNil() {
+ return errors.New("nil pointer passed to StructScan destination")
+ }
+
+ base := reflectx.Deref(v.Type())
+ scannable := isScannable(base)
+
+ if structOnly && scannable {
+ return structOnlyError(base)
+ }
+
+ columns, err := r.Columns()
+ if err != nil {
+ return err
+ }
+
+ if scannable && len(columns) > 1 {
+ return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns))
+ }
+
+ if scannable {
+ return r.Scan(dest)
+ }
+
+ m := r.Mapper
+
+ fields := m.TraversalsByName(v.Type(), columns)
+ // if we are not unsafe and are missing fields, return an error
+ if f, err := missingFields(fields); err != nil && !r.unsafe {
+ return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
+ }
+ values := make([]interface{}, len(columns))
+
+ err = fieldsByTraversal(v, fields, values, true)
+ if err != nil {
+ return err
+ }
+ // scan into the struct field pointers and append to our results
+ return r.Scan(values...)
+}
+
+// StructScan a single Row into dest.
+func (r *Row) StructScan(dest interface{}) error {
+ return r.scanAny(dest, true)
+}
+
+// SliceScan a row, returning a []interface{} with values similar to MapScan.
+// This function is primarily intended for use where the number of columns
+// is not known. Because you can pass an []interface{} directly to Scan,
+// it's recommended that you do that as it will not have to allocate new
+// slices per row.
+func SliceScan(r ColScanner) ([]interface{}, error) {
+ // ignore r.started, since we needn't use reflect for anything.
+ columns, err := r.Columns()
+ if err != nil {
+ return []interface{}{}, err
+ }
+
+ values := make([]interface{}, len(columns))
+ for i := range values {
+ values[i] = new(interface{})
+ }
+
+ err = r.Scan(values...)
+
+ if err != nil {
+ return values, err
+ }
+
+ for i := range columns {
+ values[i] = *(values[i].(*interface{}))
+ }
+
+ return values, r.Err()
+}
+
+// MapScan scans a single Row into the dest map[string]interface{}.
+// Use this to get results for SQL that might not be under your control
+// (for instance, if you're building an interface for an SQL server that
+// executes SQL from input). Please do not use this as a primary interface!
+// This will modify the map sent to it in place, so reuse the same map with
+// care. Columns which occur more than once in the result will overwrite
+// each other!
+func MapScan(r ColScanner, dest map[string]interface{}) error {
+ // ignore r.started, since we needn't use reflect for anything.
+ columns, err := r.Columns()
+ if err != nil {
+ return err
+ }
+
+ values := make([]interface{}, len(columns))
+ for i := range values {
+ values[i] = new(interface{})
+ }
+
+ err = r.Scan(values...)
+ if err != nil {
+ return err
+ }
+
+ for i, column := range columns {
+ dest[column] = *(values[i].(*interface{}))
+ }
+
+ return r.Err()
+}
+
+type rowsi interface {
+ Close() error
+ Columns() ([]string, error)
+ Err() error
+ Next() bool
+ Scan(...interface{}) error
+}
+
+// structOnlyError returns an error appropriate for type when a non-scannable
+// struct is expected but something else is given
+func structOnlyError(t reflect.Type) error {
+ isStruct := t.Kind() == reflect.Struct
+ isScanner := reflect.PtrTo(t).Implements(_scannerInterface)
+ if !isStruct {
+ return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind())
+ }
+ if isScanner {
+ return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name())
+ }
+ return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name())
+}
+
+// scanAll scans all rows into a destination, which must be a slice of any
+// type. It resets the slice length to zero before appending each element to
+// the slice. If the destination slice type is a Struct, then StructScan will
+// be used on each row. If the destination is some other kind of base type,
+// then each row must only have one column which can scan into that type. This
+// allows you to do something like:
+//
+// rows, _ := db.Query("select id from people;")
+// var ids []int
+// scanAll(rows, &ids, false)
+//
+// and ids will be a list of the id results. I realize that this is a desirable
+// interface to expose to users, but for now it will only be exposed via changes
+// to `Get` and `Select`. The reason that this has been implemented like this is
+// this is the only way to not duplicate reflect work in the new API while
+// maintaining backwards compatibility.
+func scanAll(rows rowsi, dest interface{}, structOnly bool) error {
+ var v, vp reflect.Value
+
+ value := reflect.ValueOf(dest)
+
+ // json.Unmarshal returns errors for these
+ if value.Kind() != reflect.Ptr {
+ return errors.New("must pass a pointer, not a value, to StructScan destination")
+ }
+ if value.IsNil() {
+ return errors.New("nil pointer passed to StructScan destination")
+ }
+ direct := reflect.Indirect(value)
+
+ slice, err := baseType(value.Type(), reflect.Slice)
+ if err != nil {
+ return err
+ }
+ direct.SetLen(0)
+
+ isPtr := slice.Elem().Kind() == reflect.Ptr
+ base := reflectx.Deref(slice.Elem())
+ scannable := isScannable(base)
+
+ if structOnly && scannable {
+ return structOnlyError(base)
+ }
+
+ columns, err := rows.Columns()
+ if err != nil {
+ return err
+ }
+
+ // if it's a base type make sure it only has 1 column; if not return an error
+ if scannable && len(columns) > 1 {
+ return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns))
+ }
+
+ if !scannable {
+ var values []interface{}
+ var m *reflectx.Mapper
+
+ switch rows := rows.(type) {
+ case *Rows:
+ m = rows.Mapper
+ default:
+ m = mapper()
+ }
+
+ fields := m.TraversalsByName(base, columns)
+ // if we are not unsafe and are missing fields, return an error
+ if f, err := missingFields(fields); err != nil && !isUnsafe(rows) {
+ return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
+ }
+ values = make([]interface{}, len(columns))
+
+ for rows.Next() {
+ // create a new struct type (which returns PtrTo) and indirect it
+ vp = reflect.New(base)
+ v = reflect.Indirect(vp)
+
+ err = fieldsByTraversal(v, fields, values, true)
+ if err != nil {
+ return err
+ }
+
+ // scan into the struct field pointers and append to our results
+ err = rows.Scan(values...)
+ if err != nil {
+ return err
+ }
+
+ if isPtr {
+ direct.Set(reflect.Append(direct, vp))
+ } else {
+ direct.Set(reflect.Append(direct, v))
+ }
+ }
+ } else {
+ for rows.Next() {
+ vp = reflect.New(base)
+ err = rows.Scan(vp.Interface())
+ if err != nil {
+ return err
+ }
+ // append
+ if isPtr {
+ direct.Set(reflect.Append(direct, vp))
+ } else {
+ direct.Set(reflect.Append(direct, reflect.Indirect(vp)))
+ }
+ }
+ }
+
+ return rows.Err()
+}
+
+// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately
+// it doesn't really feel like it's named properly. There is an incongruency
+// between this and the way that StructScan (which might better be ScanStruct
+// anyway) works on a rows object.
+
+// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice.
+// StructScan will scan in the entire rows result, so if you do not want to
+// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan.
+// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default.
+func StructScan(rows rowsi, dest interface{}) error {
+ return scanAll(rows, dest, true)
+
+}
+
+// reflect helpers
+
+func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {
+ t = reflectx.Deref(t)
+ if t.Kind() != expected {
+ return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind())
+ }
+ return t, nil
+}
+
+// fieldsByName fills a values interface with fields from the passed value based
+// on the traversals in int. If ptrs is true, return addresses instead of values.
+// We write this instead of using FieldsByName to save allocations and map lookups
+// when iterating over many rows. Empty traversals will get an interface pointer.
+// Because of the necessity of requesting ptrs or values, it's considered a bit too
+// specialized for inclusion in reflectx itself.
+func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error {
+ v = reflect.Indirect(v)
+ if v.Kind() != reflect.Struct {
+ return errors.New("argument not a struct")
+ }
+
+ for i, traversal := range traversals {
+ if len(traversal) == 0 {
+ values[i] = new(interface{})
+ continue
+ }
+ f := reflectx.FieldByIndexes(v, traversal)
+ if ptrs {
+ values[i] = f.Addr().Interface()
+ } else {
+ values[i] = f.Interface()
+ }
+ }
+ return nil
+}
+
+func missingFields(transversals [][]int) (field int, err error) {
+ for i, t := range transversals {
+ if len(t) == 0 {
+ return i, errors.New("missing field")
+ }
+ }
+ return 0, nil
+}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go
new file mode 100644
index 0000000000..32621d56d7
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go
@@ -0,0 +1,415 @@
+//go:build go1.8
+// +build go1.8
+
+package sqlx
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+)
+
+// ConnectContext to a database and verify with a ping.
+func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) {
+ db, err := Open(driverName, dataSourceName)
+ if err != nil {
+ return db, err
+ }
+ err = db.PingContext(ctx)
+ return db, err
+}
+
+// QueryerContext is an interface used by GetContext and SelectContext
+type QueryerContext interface {
+ QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
+ QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error)
+ QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row
+}
+
+// PreparerContext is an interface used by PreparexContext.
+type PreparerContext interface {
+ PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
+}
+
+// ExecerContext is an interface used by MustExecContext and LoadFileContext
+type ExecerContext interface {
+ ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
+}
+
+// ExtContext is a union interface which can bind, query, and exec, with Context
+// used by NamedQueryContext and NamedExecContext.
+type ExtContext interface {
+ binder
+ QueryerContext
+ ExecerContext
+}
+
+// SelectContext executes a query using the provided Queryer, and StructScans
+// each row into dest, which must be a slice. If the slice elements are
+// scannable, then the result set must have only one column. Otherwise,
+// StructScan is used. The *sql.Rows are closed automatically.
+// Any placeholder parameters are replaced with supplied args.
+func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
+ rows, err := q.QueryxContext(ctx, query, args...)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// PreparexContext prepares a statement.
+//
+// The provided context is used for the preparation of the statement, not for
+// the execution of the statement.
+func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) {
+ s, err := p.PrepareContext(ctx, query)
+ if err != nil {
+ return nil, err
+ }
+ return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
+}
+
+// GetContext does a QueryRow using the provided Queryer, and scans the
+// resulting row to dest. If dest is scannable, the result must only have one
+// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like
+// row.Scan would. Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
+ r := q.QueryRowxContext(ctx, query, args...)
+ return r.scanAny(dest, false)
+}
+
+// LoadFileContext exec's every statement in a file (as a single call to Exec).
+// LoadFileContext may return a nil *sql.Result if errors are encountered
+// locating or reading the file at path. LoadFile reads the entire file into
+// memory, so it is not suitable for loading large data dumps, but can be useful
+// for initializing schemas or loading indexes.
+//
+// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
+// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
+// this by requiring something with DriverName() and then attempting to split the
+// queries will be difficult to get right, and its current driver-specific behavior
+// is deemed at least not complex in its incorrectness.
+func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) {
+ realpath, err := filepath.Abs(path)
+ if err != nil {
+ return nil, err
+ }
+ contents, err := ioutil.ReadFile(realpath)
+ if err != nil {
+ return nil, err
+ }
+ res, err := e.ExecContext(ctx, string(contents))
+ return &res, err
+}
+
+// MustExecContext execs the query using e and panics if there was an error.
+// Any placeholder parameters are replaced with supplied args.
+func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result {
+ res, err := e.ExecContext(ctx, query, args...)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// PrepareNamedContext returns an sqlx.NamedStmt
+func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
+ return prepareNamedContext(ctx, db, query)
+}
+
+// NamedQueryContext using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) {
+ return NamedQueryContext(ctx, db, query, arg)
+}
+
+// NamedExecContext using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
+ return NamedExecContext(ctx, db, query, arg)
+}
+
+// SelectContext using this DB.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return SelectContext(ctx, db, dest, query, args...)
+}
+
+// GetContext using this DB.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return GetContext(ctx, db, dest, query, args...)
+}
+
+// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
+//
+// The provided context is used for the preparation of the statement, not for
+// the execution of the statement.
+func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
+ return PreparexContext(ctx, db, query)
+}
+
+// QueryxContext queries the database and returns an *sqlx.Rows.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+ r, err := db.DB.QueryContext(ctx, query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// QueryRowxContext queries the database and returns an *sqlx.Row.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
+ rows, err := db.DB.QueryContext(ctx, query, args...)
+ return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
+}
+
+// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead
+// of an *sql.Tx.
+//
+// The provided context is used until the transaction is committed or rolled
+// back. If the context is canceled, the sql package will roll back the
+// transaction. Tx.Commit will return an error if the context provided to
+// MustBeginContext is canceled.
+func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx {
+ tx, err := db.BeginTxx(ctx, opts)
+ if err != nil {
+ panic(err)
+ }
+ return tx
+}
+
+// MustExecContext (panic) runs MustExec using this database.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
+ return MustExecContext(ctx, db, query, args...)
+}
+
+// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an
+// *sql.Tx.
+//
+// The provided context is used until the transaction is committed or rolled
+// back. If the context is canceled, the sql package will roll back the
+// transaction. Tx.Commit will return an error if the context provided to
+// BeginxContext is canceled.
+func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
+ tx, err := db.DB.BeginTx(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// Connx returns an *sqlx.Conn instead of an *sql.Conn.
+func (db *DB) Connx(ctx context.Context) (*Conn, error) {
+ conn, err := db.DB.Conn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Conn{Conn: conn, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, nil
+}
+
+// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an
+// *sql.Tx.
+//
+// The provided context is used until the transaction is committed or rolled
+// back. If the context is canceled, the sql package will roll back the
+// transaction. Tx.Commit will return an error if the context provided to
+// BeginxContext is canceled.
+func (c *Conn) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
+ tx, err := c.Conn.BeginTx(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &Tx{Tx: tx, driverName: c.driverName, unsafe: c.unsafe, Mapper: c.Mapper}, err
+}
+
+// SelectContext using this Conn.
+// Any placeholder parameters are replaced with supplied args.
+func (c *Conn) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return SelectContext(ctx, c, dest, query, args...)
+}
+
+// GetContext using this Conn.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (c *Conn) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return GetContext(ctx, c, dest, query, args...)
+}
+
+// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
+//
+// The provided context is used for the preparation of the statement, not for
+// the execution of the statement.
+func (c *Conn) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
+ return PreparexContext(ctx, c, query)
+}
+
+// QueryxContext queries the database and returns an *sqlx.Rows.
+// Any placeholder parameters are replaced with supplied args.
+func (c *Conn) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+ r, err := c.Conn.QueryContext(ctx, query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: c.unsafe, Mapper: c.Mapper}, err
+}
+
+// QueryRowxContext queries the database and returns an *sqlx.Row.
+// Any placeholder parameters are replaced with supplied args.
+func (c *Conn) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
+ rows, err := c.Conn.QueryContext(ctx, query, args...)
+ return &Row{rows: rows, err: err, unsafe: c.unsafe, Mapper: c.Mapper}
+}
+
+// Rebind a query within a Conn's bindvar type.
+func (c *Conn) Rebind(query string) string {
+ return Rebind(BindType(c.driverName), query)
+}
+
+// StmtxContext returns a version of the prepared statement which runs within a
+// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt.
+func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt {
+ var s *sql.Stmt
+ switch v := stmt.(type) {
+ case Stmt:
+ s = v.Stmt
+ case *Stmt:
+ s = v.Stmt
+ case *sql.Stmt:
+ s = v
+ default:
+ panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
+ }
+ return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper}
+}
+
+// NamedStmtContext returns a version of the prepared statement which runs
+// within a transaction.
+func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt {
+ return &NamedStmt{
+ QueryString: stmt.QueryString,
+ Params: stmt.Params,
+ Stmt: tx.StmtxContext(ctx, stmt.Stmt),
+ }
+}
+
+// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
+//
+// The provided context is used for the preparation of the statement, not for
+// the execution of the statement.
+func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
+ return PreparexContext(ctx, tx, query)
+}
+
+// PrepareNamedContext returns an sqlx.NamedStmt
+func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
+ return prepareNamedContext(ctx, tx, query)
+}
+
+// MustExecContext runs MustExecContext within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
+ return MustExecContext(ctx, tx, query, args...)
+}
+
+// QueryxContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+ r, err := tx.Tx.QueryContext(ctx, query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
+}
+
+// SelectContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return SelectContext(ctx, tx, dest, query, args...)
+}
+
+// GetContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return GetContext(ctx, tx, dest, query, args...)
+}
+
+// QueryRowxContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
+ rows, err := tx.Tx.QueryContext(ctx, query, args...)
+ return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
+}
+
+// NamedExecContext using this Tx.
+// Any named placeholder parameters are replaced with fields from arg.
+func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
+ return NamedExecContext(ctx, tx, query, arg)
+}
+
+// SelectContext using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error {
+ return SelectContext(ctx, &qStmt{s}, dest, "", args...)
+}
+
+// GetContext using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error {
+ return GetContext(ctx, &qStmt{s}, dest, "", args...)
+}
+
+// MustExecContext (panic) using this statement. Note that the query portion of
+// the error output will be blank, as Stmt does not expose its query.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result {
+ return MustExecContext(ctx, &qStmt{s}, "", args...)
+}
+
+// QueryRowxContext using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row {
+ qs := &qStmt{s}
+ return qs.QueryRowxContext(ctx, "", args...)
+}
+
+// QueryxContext using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) {
+ qs := &qStmt{s}
+ return qs.QueryxContext(ctx, "", args...)
+}
+
+func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
+ return q.Stmt.QueryContext(ctx, args...)
+}
+
+func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+ r, err := q.Stmt.QueryContext(ctx, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
+}
+
+func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
+ rows, err := q.Stmt.QueryContext(ctx, args...)
+ return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
+}
+
+func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
+ return q.Stmt.ExecContext(ctx, args...)
+}
diff --git a/vendor/github.com/oklog/ulid/v2/.gitignore b/vendor/github.com/oklog/ulid/v2/.gitignore
new file mode 100644
index 0000000000..c92c4d5608
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/.gitignore
@@ -0,0 +1,29 @@
+#### joe made this: http://goel.io/joe
+
+#####=== Go ===#####
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
diff --git a/vendor/github.com/oklog/ulid/v2/AUTHORS.md b/vendor/github.com/oklog/ulid/v2/AUTHORS.md
new file mode 100644
index 0000000000..95581c78b0
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/AUTHORS.md
@@ -0,0 +1,2 @@
+- Peter Bourgon (@peterbourgon)
+- Tomás Senart (@tsenart)
diff --git a/vendor/github.com/oklog/ulid/v2/CHANGELOG.md b/vendor/github.com/oklog/ulid/v2/CHANGELOG.md
new file mode 100644
index 0000000000..8da38c6b00
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/CHANGELOG.md
@@ -0,0 +1,33 @@
+## 1.3.1 / 2018-10-02
+
+* Use underlying entropy source for random increments in Monotonic (#32)
+
+## 1.3.0 / 2018-09-29
+
+* Monotonic entropy support (#31)
+
+## 1.2.0 / 2018-09-09
+
+* Add a function to convert Unix time in milliseconds back to time.Time (#30)
+
+## 1.1.0 / 2018-08-15
+
+* Ensure random part is always read from the entropy reader in full (#28)
+
+## 1.0.0 / 2018-07-29
+
+* Add ParseStrict and MustParseStrict functions (#26)
+* Enforce overflow checking when parsing (#20)
+
+## 0.3.0 / 2017-01-03
+
+* Implement ULID.Compare method
+
+## 0.2.0 / 2016-12-13
+
+* Remove year 2262 Timestamp bug. (#1)
+* Gracefully handle invalid encodings when parsing.
+
+## 0.1.0 / 2016-12-06
+
+* First ULID release
diff --git a/vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md
new file mode 100644
index 0000000000..68f03f26eb
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+# Contributing
+
+We use GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first propose your ideas
+ in a Github issue. This will avoid unnecessary work and surely give
+ you and us a good deal of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/oklog/ulid/v2/LICENSE b/vendor/github.com/oklog/ulid/v2/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/oklog/ulid/v2/README.md b/vendor/github.com/oklog/ulid/v2/README.md
new file mode 100644
index 0000000000..c0094ce881
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/README.md
@@ -0,0 +1,234 @@
+# Universally Unique Lexicographically Sortable Identifier
+
+[](https://github.com/oklog/ulid/releases/latest)
+
+[](https://goreportcard.com/report/oklog/ulid)
+[](https://coveralls.io/github/oklog/ulid?branch=master)
+[](https://pkg.go.dev/github.com/oklog/ulid/v2)
+[](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE)
+
+A Go port of [ulid/javascript](https://github.com/ulid/javascript) with binary format implemented.
+
+## Background
+
+A GUID/UUID can be suboptimal for many use-cases because:
+
+- It isn't the most character efficient way of encoding 128 bits
+- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address
+- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures
+- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures
+
+A ULID however:
+
+- Is compatible with UUID/GUID's
+- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact)
+- Lexicographically sortable
+- Canonically encoded as a 26 character string, as opposed to the 36 character UUID
+- Uses Crockford's base32 for better efficiency and readability (5 bits per character)
+- Case insensitive
+- No special characters (URL safe)
+- Monotonic sort order (correctly detects and handles the same millisecond)
+
+## Install
+
+This package requires Go modules.
+
+```shell
+go get github.com/oklog/ulid/v2
+```
+
+## Usage
+
+ULIDs are constructed from two things: a timestamp with millisecond precision,
+and some random data.
+
+Timestamps are modeled as uint64 values representing a Unix time in milliseconds.
+They can be produced by passing a [time.Time](https://pkg.go.dev/time#Time) to
+[ulid.Timestamp](https://pkg.go.dev/github.com/oklog/ulid/v2#Timestamp),
+or by calling [time.Time.UnixMilli](https://pkg.go.dev/time#Time.UnixMilli)
+and converting the returned value to `uint64`.
+
+Random data is taken from a provided [io.Reader](https://pkg.go.dev/io#Reader).
+This design allows for greater flexibility when choosing trade-offs, but can be
+a bit confusing to newcomers.
+
+If you just want to generate a ULID and don't (yet) care about details like
+performance, cryptographic security, monotonicity, etc., use the
+[ulid.Make](https://pkg.go.dev/github.com/oklog/ulid/v2#Make) helper function.
+This function calls [time.Now](https://pkg.go.dev/time#Now) to get a timestamp,
+and uses a source of entropy which is process-global,
+[pseudo-random](https://pkg.go.dev/math/rand)), and
+[monotonic](https://pkg.go.dev/oklog/ulid/v2#LockedMonotonicReader)).
+
+```go
+println(ulid.Make())
+// 01G65Z755AFWAKHE12NY0CQ9FH
+```
+
+More advanced use cases should utilize
+[ulid.New](https://pkg.go.dev/github.com/oklog/ulid/v2#New).
+
+```go
+entropy := rand.New(rand.NewSource(time.Now().UnixNano()))
+ms := ulid.Timestamp(time.Now())
+println(ulid.New(ms, entropy))
+// 01G65Z755AFWAKHE12NY0CQ9FH
+```
+
+Care should be taken when providing a source of entropy.
+
+The above example utilizes [math/rand.Rand](https://pkg.go.dev/math/rand#Rand),
+which is not safe for concurrent use by multiple goroutines. Consider
+alternatives such as
+[x/exp/rand](https://pkg.go.dev/golang.org/x/exp/rand#LockedSource).
+Security-sensitive use cases should always use cryptographically secure entropy
+provided by [crypto/rand](https://pkg.go.dev/crypto/rand).
+
+Performance-sensitive use cases should avoid synchronization when generating
+IDs. One option is to use a unique source of entropy for each concurrent
+goroutine, which results in no lock contention, but cannot provide strong
+guarantees about the random data, and does not provide monotonicity within a
+given millisecond. One common performance optimization is to pool sources of
+entropy using a [sync.Pool](https://pkg.go.dev/sync#Pool).
+
+Monotonicity is a property that says each ULID is "bigger than" the previous
+one. ULIDs are automatically monotonic, but only to millisecond precision. ULIDs
+generated within the same millisecond are ordered by their random component,
+which means they are by default un-ordered. You can use
+[ulid.MonotonicEntropy](https://pkg.go.dev/oklog/ulid/v2#MonotonicEntropy) or
+[ulid.LockedMonotonicEntropy](https://pkg.go.dev/oklog/ulid/v2#LockedMonotonicEntropy)
+to create ULIDs that are monotonic within a given millisecond, with caveats. See
+the documentation for details.
+
+If you don't care about time-based ordering of generated IDs, then there's no
+reason to use ULIDs! There are many other kinds of IDs that are easier, faster,
+smaller, etc. Consider UUIDs.
+
+## Commandline tool
+
+This repo also provides a tool to generate and parse ULIDs at the command line.
+These commands should install the latest version of the tool at `bin/ulid`:
+
+```shell
+cd $(mktemp -d)
+env GOPATH=$(pwd) GO111MODULE=on go get -v github.com/oklog/ulid/v2/cmd/ulid
+```
+
+Usage:
+
+```shell
+Usage: ulid [-hlqz] [-f ] [parameters ...]
+ -f, --format= when parsing, show times in this format: default, rfc3339, unix, ms
+ -h, --help print this help text
+ -l, --local when parsing, show local time instead of UTC
+ -q, --quick when generating, use non-crypto-grade entropy
+ -z, --zero when generating, fix entropy to all-zeroes
+```
+
+Examples:
+
+```shell
+$ ulid
+01D78XYFJ1PRM1WPBCBT3VHMNV
+$ ulid -z
+01D78XZ44G0000000000000000
+$ ulid 01D78XZ44G0000000000000000
+Sun Mar 31 03:51:23.536 UTC 2019
+$ ulid --format=rfc3339 --local 01D78XZ44G0000000000000000
+2019-03-31T05:51:23.536+02:00
+```
+
+## Specification
+
+Below is the current specification of ULID as implemented in this repository.
+
+### Components
+
+**Timestamp**
+- 48 bits
+- UNIX-time in milliseconds
+- Won't run out of space till the year 10889 AD
+
+**Entropy**
+- 80 bits
+- User defined entropy source.
+- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic)
+
+### Encoding
+
+[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown.
+This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse.
+
+```
+0123456789ABCDEFGHJKMNPQRSTVWXYZ
+```
+
+### Binary Layout and Byte Order
+
+The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order).
+
+```
+0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 32_bit_uint_time_high |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 16_bit_uint_time_low | 16_bit_uint_random |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 32_bit_uint_random |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| 32_bit_uint_random |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+```
+
+### String Representation
+
+```
+ 01AN4Z07BY 79KA1307SR9X4MV3
+|----------| |----------------|
+ Timestamp Entropy
+ 10 chars 16 chars
+ 48bits 80bits
+ base32 base32
+```
+
+## Test
+
+```shell
+go test ./...
+```
+
+## Benchmarks
+
+On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1
+
+```
+BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op
+BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op
+BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op
+BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op
+BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op
+BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op
+BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op
+BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op
+BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op
+BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op
+BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op
+BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op
+BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op
+BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op
+BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op
+BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op
+BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op
+BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op
+BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op
+BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op
+BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op
+BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op
+```
+
+## Prior Art
+
+- [ulid/javascript](https://github.com/ulid/javascript)
+- [RobThree/NUlid](https://github.com/RobThree/NUlid)
+- [imdario/go-ulid](https://github.com/imdario/go-ulid)
diff --git a/vendor/github.com/oklog/ulid/v2/ulid.go b/vendor/github.com/oklog/ulid/v2/ulid.go
new file mode 100644
index 0000000000..0cb258d431
--- /dev/null
+++ b/vendor/github.com/oklog/ulid/v2/ulid.go
@@ -0,0 +1,696 @@
+// Copyright 2016 The Oklog Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ulid
+
+import (
+ "bufio"
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "io"
+ "math"
+ "math/bits"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+/*
+An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier
+
+ The components are encoded as 16 octets.
+ Each component is encoded with the MSB first (network byte order).
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 32_bit_uint_time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 16_bit_uint_time_low | 16_bit_uint_random |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 32_bit_uint_random |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 32_bit_uint_random |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+type ULID [16]byte
+
+var (
+ // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong
+ // data size.
+ ErrDataSize = errors.New("ulid: bad data size when unmarshaling")
+
+ // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with
+ // invalid Base32 encodings.
+ ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling")
+
+ // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient
+ // size.
+ ErrBufferSize = errors.New("ulid: bad buffer size when marshaling")
+
+ // ErrBigTime is returned when constructing an ULID with a time that is larger
+ // than MaxTime.
+ ErrBigTime = errors.New("ulid: time too big")
+
+ // ErrOverflow is returned when unmarshaling a ULID whose first character is
+ // larger than 7, thereby exceeding the valid bit depth of 128.
+ ErrOverflow = errors.New("ulid: overflow when unmarshaling")
+
+ // ErrMonotonicOverflow is returned by a Monotonic entropy source when
+ // incrementing the previous ULID's entropy bytes would result in overflow.
+ ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow")
+
+ // ErrScanValue is returned when the value passed to scan cannot be unmarshaled
+ // into the ULID.
+ ErrScanValue = errors.New("ulid: source value must be a string or byte slice")
+)
+
+// MonotonicReader is an interface that should yield monotonically increasing
+// entropy into the provided slice for all calls with the same ms parameter. If
+// a MonotonicReader is provided to the New constructor, its MonotonicRead
+// method will be used instead of Read.
+type MonotonicReader interface {
+ io.Reader
+ MonotonicRead(ms uint64, p []byte) error
+}
+
+// New returns an ULID with the given Unix milliseconds timestamp and an
+// optional entropy source. Use the Timestamp function to convert
+// a time.Time to Unix milliseconds.
+//
+// ErrBigTime is returned when passing a timestamp bigger than MaxTime.
+// Reading from the entropy source may also return an error.
+//
+// Safety for concurrent use is only dependent on the safety of the
+// entropy source.
+func New(ms uint64, entropy io.Reader) (id ULID, err error) {
+ if err = id.SetTime(ms); err != nil {
+ return id, err
+ }
+
+ switch e := entropy.(type) {
+ case nil:
+ return id, err
+ case MonotonicReader:
+ err = e.MonotonicRead(ms, id[6:])
+ default:
+ _, err = io.ReadFull(e, id[6:])
+ }
+
+ return id, err
+}
+
+// MustNew is a convenience function equivalent to New that panics on failure
+// instead of returning an error.
+func MustNew(ms uint64, entropy io.Reader) ULID {
+ id, err := New(ms, entropy)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+var (
+ entropy io.Reader
+ entropyOnce sync.Once
+)
+
+// DefaultEntropy returns a thread-safe per process monotonically increasing
+// entropy source.
+func DefaultEntropy() io.Reader {
+ entropyOnce.Do(func() {
+ rng := rand.New(rand.NewSource(time.Now().UnixNano()))
+ entropy = &LockedMonotonicReader{
+ MonotonicReader: Monotonic(rng, 0),
+ }
+ })
+ return entropy
+}
+
+// Make returns an ULID with the current time in Unix milliseconds and
+// monotonically increasing entropy for the same millisecond.
+// It is safe for concurrent use, leveraging a sync.Pool underneath for minimal
+// contention.
+func Make() (id ULID) {
+ // NOTE: MustNew can't panic since DefaultEntropy never returns an error.
+ return MustNew(Now(), DefaultEntropy())
+}
+
+// Parse parses an encoded ULID, returning an error in case of failure.
+//
+// ErrDataSize is returned if the len(ulid) is different from an encoded
+// ULID's length. Invalid encodings produce undefined ULIDs. For a version that
+// returns an error instead, see ParseStrict.
+func Parse(ulid string) (id ULID, err error) {
+ return id, parse([]byte(ulid), false, &id)
+}
+
+// ParseStrict parses an encoded ULID, returning an error in case of failure.
+//
+// It is like Parse, but additionally validates that the parsed ULID consists
+// only of valid base32 characters. It is slightly slower than Parse.
+//
+// ErrDataSize is returned if the len(ulid) is different from an encoded
+// ULID's length. Invalid encodings return ErrInvalidCharacters.
+func ParseStrict(ulid string) (id ULID, err error) {
+ return id, parse([]byte(ulid), true, &id)
+}
+
+func parse(v []byte, strict bool, id *ULID) error {
+ // Check if a base32 encoded ULID is the right length.
+ if len(v) != EncodedSize {
+ return ErrDataSize
+ }
+
+ // Check if all the characters in a base32 encoded ULID are part of the
+ // expected base32 character set.
+ if strict &&
+ (dec[v[0]] == 0xFF ||
+ dec[v[1]] == 0xFF ||
+ dec[v[2]] == 0xFF ||
+ dec[v[3]] == 0xFF ||
+ dec[v[4]] == 0xFF ||
+ dec[v[5]] == 0xFF ||
+ dec[v[6]] == 0xFF ||
+ dec[v[7]] == 0xFF ||
+ dec[v[8]] == 0xFF ||
+ dec[v[9]] == 0xFF ||
+ dec[v[10]] == 0xFF ||
+ dec[v[11]] == 0xFF ||
+ dec[v[12]] == 0xFF ||
+ dec[v[13]] == 0xFF ||
+ dec[v[14]] == 0xFF ||
+ dec[v[15]] == 0xFF ||
+ dec[v[16]] == 0xFF ||
+ dec[v[17]] == 0xFF ||
+ dec[v[18]] == 0xFF ||
+ dec[v[19]] == 0xFF ||
+ dec[v[20]] == 0xFF ||
+ dec[v[21]] == 0xFF ||
+ dec[v[22]] == 0xFF ||
+ dec[v[23]] == 0xFF ||
+ dec[v[24]] == 0xFF ||
+ dec[v[25]] == 0xFF) {
+ return ErrInvalidCharacters
+ }
+
+ // Check if the first character in a base32 encoded ULID will overflow. This
+ // happens because the base32 representation encodes 130 bits, while the
+ // ULID is only 128 bits.
+ //
+ // See https://github.com/oklog/ulid/issues/9 for details.
+ if v[0] > '7' {
+ return ErrOverflow
+ }
+
+ // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid)
+ // to decode a base32 ULID.
+
+ // 6 bytes timestamp (48 bits)
+ (*id)[0] = (dec[v[0]] << 5) | dec[v[1]]
+ (*id)[1] = (dec[v[2]] << 3) | (dec[v[3]] >> 2)
+ (*id)[2] = (dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)
+ (*id)[3] = (dec[v[5]] << 4) | (dec[v[6]] >> 1)
+ (*id)[4] = (dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)
+ (*id)[5] = (dec[v[8]] << 5) | dec[v[9]]
+
+ // 10 bytes of entropy (80 bits)
+ (*id)[6] = (dec[v[10]] << 3) | (dec[v[11]] >> 2)
+ (*id)[7] = (dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)
+ (*id)[8] = (dec[v[13]] << 4) | (dec[v[14]] >> 1)
+ (*id)[9] = (dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)
+ (*id)[10] = (dec[v[16]] << 5) | dec[v[17]]
+ (*id)[11] = (dec[v[18]] << 3) | dec[v[19]]>>2
+ (*id)[12] = (dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)
+ (*id)[13] = (dec[v[21]] << 4) | (dec[v[22]] >> 1)
+ (*id)[14] = (dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)
+ (*id)[15] = (dec[v[24]] << 5) | dec[v[25]]
+
+ return nil
+}
+
+// MustParse is a convenience function equivalent to Parse that panics on failure
+// instead of returning an error.
+func MustParse(ulid string) ULID {
+ id, err := Parse(ulid)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// MustParseStrict is a convenience function equivalent to ParseStrict that
+// panics on failure instead of returning an error.
+func MustParseStrict(ulid string) ULID {
+ id, err := ParseStrict(ulid)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+// Bytes returns bytes slice representation of ULID.
+func (id ULID) Bytes() []byte {
+ return id[:]
+}
+
+// String returns a lexicographically sortable string encoded ULID
+// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3.
+// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy.
+func (id ULID) String() string {
+ ulid := make([]byte, EncodedSize)
+ _ = id.MarshalTextTo(ulid)
+ return string(ulid)
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface by
+// returning the ULID as a byte slice.
+func (id ULID) MarshalBinary() ([]byte, error) {
+ ulid := make([]byte, len(id))
+ return ulid, id.MarshalBinaryTo(ulid)
+}
+
+// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer.
+// ErrBufferSize is returned when the len(dst) != 16.
+func (id ULID) MarshalBinaryTo(dst []byte) error {
+ if len(dst) != len(id) {
+ return ErrBufferSize
+ }
+
+ copy(dst, id[:])
+ return nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by
+// copying the passed data and converting it to an ULID. ErrDataSize is
+// returned if the data length is different from ULID length.
+func (id *ULID) UnmarshalBinary(data []byte) error {
+ if len(data) != len(*id) {
+ return ErrDataSize
+ }
+
+ copy((*id)[:], data)
+ return nil
+}
+
+// Encoding is the base 32 encoding alphabet used in ULID strings.
+const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
+
+// MarshalText implements the encoding.TextMarshaler interface by
+// returning the string encoded ULID.
+func (id ULID) MarshalText() ([]byte, error) {
+ ulid := make([]byte, EncodedSize)
+ return ulid, id.MarshalTextTo(ulid)
+}
+
+// MarshalTextTo writes the ULID as a string to the given buffer.
+// ErrBufferSize is returned when the len(dst) != 26.
+func (id ULID) MarshalTextTo(dst []byte) error {
+ // Optimized unrolled loop ahead.
+ // From https://github.com/RobThree/NUlid
+
+ if len(dst) != EncodedSize {
+ return ErrBufferSize
+ }
+
+ // 10 byte timestamp
+ dst[0] = Encoding[(id[0]&224)>>5]
+ dst[1] = Encoding[id[0]&31]
+ dst[2] = Encoding[(id[1]&248)>>3]
+ dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)]
+ dst[4] = Encoding[(id[2]&62)>>1]
+ dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)]
+ dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)]
+ dst[7] = Encoding[(id[4]&124)>>2]
+ dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)]
+ dst[9] = Encoding[id[5]&31]
+
+ // 16 bytes of entropy
+ dst[10] = Encoding[(id[6]&248)>>3]
+ dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)]
+ dst[12] = Encoding[(id[7]&62)>>1]
+ dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)]
+ dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)]
+ dst[15] = Encoding[(id[9]&124)>>2]
+ dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)]
+ dst[17] = Encoding[id[10]&31]
+ dst[18] = Encoding[(id[11]&248)>>3]
+ dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)]
+ dst[20] = Encoding[(id[12]&62)>>1]
+ dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)]
+ dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)]
+ dst[23] = Encoding[(id[14]&124)>>2]
+ dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)]
+ dst[25] = Encoding[id[15]&31]
+
+ return nil
+}
+
+// Byte to index table for O(1) lookups when unmarshaling.
+// We use 0xFF as sentinel value for invalid indexes.
+var dec = [...]byte{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
+ 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF,
+ 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
+ 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
+ 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14,
+ 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
+ 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+}
+
+// EncodedSize is the length of a text encoded ULID.
+const EncodedSize = 26
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface by
+// parsing the data as string encoded ULID.
+//
+// ErrDataSize is returned if the len(v) is different from an encoded
+// ULID's length. Invalid encodings produce undefined ULIDs.
+func (id *ULID) UnmarshalText(v []byte) error {
+ return parse(v, false, id)
+}
+
+// Time returns the Unix time in milliseconds encoded in the ULID.
+// Use the top level Time function to convert the returned value to
+// a time.Time.
+func (id ULID) Time() uint64 {
+ return uint64(id[5]) | uint64(id[4])<<8 |
+ uint64(id[3])<<16 | uint64(id[2])<<24 |
+ uint64(id[1])<<32 | uint64(id[0])<<40
+}
+
+// maxTime is the maximum Unix time in milliseconds that can be
+// represented in an ULID.
+var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time()
+
+// MaxTime returns the maximum Unix time in milliseconds that
+// can be encoded in an ULID.
+func MaxTime() uint64 { return maxTime }
+
+// Now is a convenience function that returns the current
+// UTC time in Unix milliseconds. Equivalent to:
+// Timestamp(time.Now().UTC())
+func Now() uint64 { return Timestamp(time.Now().UTC()) }
+
+// Timestamp converts a time.Time to Unix milliseconds.
+//
+// Because of the way ULID stores time, times from the year
+// 10889 produces undefined results.
+func Timestamp(t time.Time) uint64 {
+ return uint64(t.Unix())*1000 +
+ uint64(t.Nanosecond()/int(time.Millisecond))
+}
+
+// Time converts Unix milliseconds in the format
+// returned by the Timestamp function to a time.Time.
+func Time(ms uint64) time.Time {
+ s := int64(ms / 1e3)
+ ns := int64((ms % 1e3) * 1e6)
+ return time.Unix(s, ns)
+}
+
+// SetTime sets the time component of the ULID to the given Unix time
+// in milliseconds.
+func (id *ULID) SetTime(ms uint64) error {
+ if ms > maxTime {
+ return ErrBigTime
+ }
+
+ (*id)[0] = byte(ms >> 40)
+ (*id)[1] = byte(ms >> 32)
+ (*id)[2] = byte(ms >> 24)
+ (*id)[3] = byte(ms >> 16)
+ (*id)[4] = byte(ms >> 8)
+ (*id)[5] = byte(ms)
+
+ return nil
+}
+
+// Entropy returns the entropy from the ULID.
+func (id ULID) Entropy() []byte {
+ e := make([]byte, 10)
+ copy(e, id[6:])
+ return e
+}
+
+// SetEntropy sets the ULID entropy to the passed byte slice.
+// ErrDataSize is returned if len(e) != 10.
+func (id *ULID) SetEntropy(e []byte) error {
+ if len(e) != 10 {
+ return ErrDataSize
+ }
+
+ copy((*id)[6:], e)
+ return nil
+}
+
+// Compare returns an integer comparing id and other lexicographically.
+// The result will be 0 if id==other, -1 if id < other, and +1 if id > other.
+func (id ULID) Compare(other ULID) int {
+ return bytes.Compare(id[:], other[:])
+}
+
+// Scan implements the sql.Scanner interface. It supports scanning
+// a string or byte slice.
+func (id *ULID) Scan(src interface{}) error {
+ switch x := src.(type) {
+ case nil:
+ return nil
+ case string:
+ return id.UnmarshalText([]byte(x))
+ case []byte:
+ return id.UnmarshalBinary(x)
+ }
+
+ return ErrScanValue
+}
+
+// Value implements the sql/driver.Valuer interface, returning the ULID as a
+// slice of bytes, by invoking MarshalBinary. If your use case requires a string
+// representation instead, you can create a wrapper type that calls String()
+// instead.
+//
+// type stringValuer ulid.ULID
+//
+// func (v stringValuer) Value() (driver.Value, error) {
+// return ulid.ULID(v).String(), nil
+// }
+//
+// // Example usage.
+// db.Exec("...", stringValuer(id))
+//
+// All valid ULIDs, including zero-value ULIDs, return a valid Value with a nil
+// error. If your use case requires zero-value ULIDs to return a non-nil error,
+// you can create a wrapper type that special-cases this behavior.
+//
+// var zeroValueULID ulid.ULID
+//
+// type invalidZeroValuer ulid.ULID
+//
+// func (v invalidZeroValuer) Value() (driver.Value, error) {
+// if ulid.ULID(v).Compare(zeroValueULID) == 0 {
+// return nil, fmt.Errorf("zero value")
+// }
+// return ulid.ULID(v).Value()
+// }
+//
+// // Example usage.
+// db.Exec("...", invalidZeroValuer(id))
+//
+func (id ULID) Value() (driver.Value, error) {
+ return id.MarshalBinary()
+}
+
+// Monotonic returns an entropy source that is guaranteed to yield
+// strictly increasing entropy bytes for the same ULID timestamp.
+// On conflicts, the previous ULID entropy is incremented with a
+// random number between 1 and `inc` (inclusive).
+//
+// The provided entropy source must actually yield random bytes or else
+// monotonic reads are not guaranteed to terminate, since there isn't
+// enough randomness to compute an increment number.
+//
+// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`.
+// The lower the value of `inc`, the easier the next ULID within the
+// same millisecond is to guess. If your code depends on ULIDs having
+// secure entropy bytes, then don't go under this default unless you know
+// what you're doing.
+//
+// The returned type isn't safe for concurrent use.
+func Monotonic(entropy io.Reader, inc uint64) *MonotonicEntropy {
+ m := MonotonicEntropy{
+ Reader: bufio.NewReader(entropy),
+ inc: inc,
+ }
+
+ if m.inc == 0 {
+ m.inc = math.MaxUint32
+ }
+
+ if rng, ok := entropy.(rng); ok {
+ m.rng = rng
+ }
+
+ return &m
+}
+
+type rng interface{ Int63n(n int64) int64 }
+
+// LockedMonotonicReader wraps a MonotonicReader with a sync.Mutex for
+// safe concurrent use.
+type LockedMonotonicReader struct {
+ mu sync.Mutex
+ MonotonicReader
+}
+
+// MonotonicRead synchronizes calls to the wrapped MonotonicReader.
+func (r *LockedMonotonicReader) MonotonicRead(ms uint64, p []byte) (err error) {
+ r.mu.Lock()
+ err = r.MonotonicReader.MonotonicRead(ms, p)
+ r.mu.Unlock()
+ return err
+}
+
+// MonotonicEntropy is an opaque type that provides monotonic entropy.
+type MonotonicEntropy struct {
+ io.Reader
+ ms uint64
+ inc uint64
+ entropy uint80
+ rand [8]byte
+ rng rng
+}
+
+// MonotonicRead implements the MonotonicReader interface.
+func (m *MonotonicEntropy) MonotonicRead(ms uint64, entropy []byte) (err error) {
+ if !m.entropy.IsZero() && m.ms == ms {
+ err = m.increment()
+ m.entropy.AppendTo(entropy)
+ } else if _, err = io.ReadFull(m.Reader, entropy); err == nil {
+ m.ms = ms
+ m.entropy.SetBytes(entropy)
+ }
+ return err
+}
+
+// increment the previous entropy number with a random number
+// of up to m.inc (inclusive).
+func (m *MonotonicEntropy) increment() error {
+ if inc, err := m.random(); err != nil {
+ return err
+ } else if m.entropy.Add(inc) {
+ return ErrMonotonicOverflow
+ }
+ return nil
+}
+
+// random returns a uniform random value in [1, m.inc), reading entropy
+// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1.
+// Adapted from: https://golang.org/pkg/crypto/rand/#Int
+func (m *MonotonicEntropy) random() (inc uint64, err error) {
+ if m.inc <= 1 {
+ return 1, nil
+ }
+
+ // Fast path for using a underlying rand.Rand directly.
+ if m.rng != nil {
+ // Range: [1, m.inc)
+ return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil
+ }
+
+ // bitLen is the maximum bit length needed to encode a value < m.inc.
+ bitLen := bits.Len64(m.inc)
+
+ // byteLen is the maximum byte length needed to encode a value < m.inc.
+ byteLen := uint(bitLen+7) / 8
+
+ // msbitLen is the number of bits in the most significant byte of m.inc-1.
+ msbitLen := uint(bitLen % 8)
+ if msbitLen == 0 {
+ msbitLen = 8
+ }
+
+ for inc == 0 || inc >= m.inc {
+ if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil {
+ return 0, err
+ }
+
+ // Clear bits in the first byte to increase the probability
+ // that the candidate is < m.inc.
+ m.rand[0] &= uint8(int(1<